summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2015-12-02 18:12:57 +0000
committerTim Northover <tnorthover@apple.com>2015-12-02 18:12:57 +0000
commit57b1a9599b8b3a3c571103ed481932c2768d8da9 (patch)
treee1b86a14e10e4591431ce62ac36a9caf82b791a9 /include
parent454061bf3aeba2ac0b7b5dadb4f07e497139609f (diff)
AArch64: use ldxp/stxp pair to implement 128-bit atomic loads.
The ARM ARM is clear that 128-bit loads are only guaranteed to have been atomic if there has been a corresponding successful stxp. It's less clear for AArch32, so I'm leaving that alone for now. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@254524 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'include')
-rw-r--r--include/llvm/Target/TargetLowering.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index cb5a5796e98..819458dbb0f 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -130,10 +130,12 @@ public:
/// support for these atomic instructions, and also have different options
/// w.r.t. what they should expand to.
enum class AtomicExpansionKind {
- None, // Don't expand the instruction.
- LLSC, // Expand the instruction into loadlinked/storeconditional; used
- // by ARM/AArch64.
- CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+ None, // Don't expand the instruction.
+ LLSC, // Expand the instruction into loadlinked/storeconditional; used
+ // by ARM/AArch64.
+ LLOnly, // Expand the (load) instruction into just a load-linked, which has
+ // greater atomic guarantees than a normal load.
+ CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
};
static ISD::NodeType getExtendForContent(BooleanContent Content) {