summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Sanders <daniel_l_sanders@apple.com>2017-11-28 22:07:05 +0000
committerDaniel Sanders <daniel_l_sanders@apple.com>2017-11-28 22:07:05 +0000
commit7f3e660b838376ca17c7902d12d5d5d305932616 (patch)
tree73f70204c5d4b9275f9491f1d8eec6f665e759b1
parentee2e970651e392df519e2173b98b916b90a64b04 (diff)
[globalisel][tablegen] Add support for importing G_ATOMIC_CMPXCHG, G_ATOMICRMW_* rules from SelectionDAG.
GIM_CheckNonAtomic has been replaced by GIM_CheckAtomicOrdering to allow it to support a wider range of orderings. This has then been used to import patterns using nodes such as atomic_cmp_swap, atomic_swap, and atomic_load_*. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319232 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelector.h5
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h13
-rw-r--r--include/llvm/Target/GlobalISel/SelectionDAGCompat.td13
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir238
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir53
-rw-r--r--test/TableGen/GlobalISelEmitter.td4
-rw-r--r--utils/TableGen/GlobalISelEmitter.cpp120
7 files changed, 396 insertions, 50 deletions
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index 550e45a4be2..726a702d96b 100644
--- a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -111,9 +111,10 @@ enum {
/// - InsnID - Instruction ID
/// - The predicate to test
GIM_CheckAPFloatImmPredicate,
- /// Check a memory operation is non-atomic.
+ /// Check a memory operation has the specified atomic ordering.
/// - InsnID - Instruction ID
- GIM_CheckNonAtomic,
+ /// - Ordering - The AtomicOrdering value
+ GIM_CheckAtomicOrdering,
/// Check the type for the specified operand
/// - InsnID - Instruction ID
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index bf2cf734efe..2704dc25c0e 100644
--- a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -226,27 +226,24 @@ bool InstructionSelector::executeMatchTable(
return false;
break;
}
- case GIM_CheckNonAtomic: {
+ case GIM_CheckAtomicOrdering: {
int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckNonAtomic(MIs["
- << InsnID << "])\n");
+ dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert((State.MIs[InsnID]->getOpcode() == TargetOpcode::G_LOAD ||
- State.MIs[InsnID]->getOpcode() == TargetOpcode::G_STORE) &&
- "Expected G_LOAD/G_STORE");
if (!State.MIs[InsnID]->hasOneMemOperand())
if (handleReject() == RejectAndGiveUp)
return false;
for (const auto &MMO : State.MIs[InsnID]->memoperands())
- if (MMO->getOrdering() != AtomicOrdering::NotAtomic)
+ if (MMO->getOrdering() != Ordering)
if (handleReject() == RejectAndGiveUp)
return false;
break;
}
-
case GIM_CheckType: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
diff --git a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index c012b20fd7b..575f228cd77 100644
--- a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -94,6 +94,19 @@ def : GINodeEquiv<G_LOAD, ld> { let CheckMMOIsNonAtomic = 1; }
// G_STORE with a non-atomic MachineMemOperand.
def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = 1; }
+def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
+def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;
+def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add>;
+def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub>;
+def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and>;
+def : GINodeEquiv<G_ATOMICRMW_NAND, atomic_load_nand>;
+def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or>;
+def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor>;
+def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min>;
+def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max>;
+def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin>;
+def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax>;
+
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
// Should be used on defs that subclass GIComplexOperandMatcher<>.
class GIComplexPatternEquiv<ComplexPattern seldag> {
diff --git a/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir b/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
new file mode 100644
index 00000000000..cab5489ab6f
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
@@ -0,0 +1,238 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @atomicrmw_xchg_i64(i64* %addr) { ret void }
+ define void @atomicrmw_add_i64(i64* %addr) { ret void }
+ define void @atomicrmw_add_i32(i64* %addr) { ret void }
+ define void @atomicrmw_sub_i32(i64* %addr) { ret void }
+ define void @atomicrmw_and_i32(i64* %addr) { ret void }
+ ; nand isn't legal
+ define void @atomicrmw_or_i32(i64* %addr) { ret void }
+ define void @atomicrmw_xor_i32(i64* %addr) { ret void }
+ define void @atomicrmw_min_i32(i64* %addr) { ret void }
+ define void @atomicrmw_max_i32(i64* %addr) { ret void }
+ define void @atomicrmw_umin_i32(i64* %addr) { ret void }
+ define void @atomicrmw_umax_i32(i64* %addr) { ret void }
+...
+
+---
+name: atomicrmw_xchg_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_xchg_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = SWPX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 1
+ %2:gpr(s64) = G_ATOMICRMW_XCHG %0, %1 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %2(s64)
+...
+---
+name: atomicrmw_add_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_add_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = LDADDX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 1
+ %2:gpr(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %2(s64)
+...
+---
+name: atomicrmw_add_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_sub_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_sub_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_and_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_and_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[CST]]
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDCLRAW [[CST2]], [[COPY]] :: (load store acquire 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_AND %0, %1 :: (load store acquire 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_or_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_or_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSETLW [[CST]], [[COPY]] :: (load store release 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_OR %0, %1 :: (load store release 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_xor_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_xor_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDEORALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_min_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_min_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_max_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_max_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_umin_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_umin_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_umax_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_umax_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir b/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
new file mode 100644
index 00000000000..67ce28ba859
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
@@ -0,0 +1,53 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @cmpxchg_i32(i64* %addr) { ret void }
+ define void @cmpxchg_i64(i64* %addr) { ret void }
+...
+
+---
+name: cmpxchg_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: cmpxchg_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CMP:%[0-9]+]]:gpr32 = MOVi32imm 0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = CASW [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 0
+ %2:gpr(s32) = G_CONSTANT i32 1
+ %3:gpr(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
+ %w0 = COPY %3(s32)
+...
+
+---
+name: cmpxchg_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: cmpxchg_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CMP:%[0-9]+]]:gpr64 = MOVi64imm 0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = CASX [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 0
+ %2:gpr(s64) = G_CONSTANT i64 1
+ %3:gpr(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %3(s64)
+...
diff --git a/test/TableGen/GlobalISelEmitter.td b/test/TableGen/GlobalISelEmitter.td
index 296946fa52a..bdfda0a3be9 100644
--- a/test/TableGen/GlobalISelEmitter.td
+++ b/test/TableGen/GlobalISelEmitter.td
@@ -832,7 +832,7 @@ def MOVfpimmz : I<(outs FPR32:$dst), (ins f32imm:$imm), [(set FPR32:$dst, fpimmz
// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 22*/ [[LABEL:[0-9]+]],
// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
-// CHECK-NEXT: GIM_CheckNonAtomic, /*MI*/0,
+// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[0] dst
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
@@ -861,7 +861,7 @@ def LOAD : I<(outs GPR32:$dst), (ins GPR32:$src1),
// CHECK-NEXT: // MIs[0] Operand 1
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s16,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_LOAD,
-// CHECK-NEXT: GIM_CheckNonAtomic, /*MI*/1,
+// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/1, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[1] Operand 0
// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s16,
// CHECK-NEXT: // MIs[1] src1
diff --git a/utils/TableGen/GlobalISelEmitter.cpp b/utils/TableGen/GlobalISelEmitter.cpp
index 505864bb0d5..f82b2fd3e9b 100644
--- a/utils/TableGen/GlobalISelEmitter.cpp
+++ b/utils/TableGen/GlobalISelEmitter.cpp
@@ -191,6 +191,8 @@ static std::string explainPredicates(const TreePatternNode *N) {
for (const auto &P : N->getPredicateFns()) {
Explanation +=
(Separator + P.getOrigPatFragRecord()->getRecord()->getName()).str();
+ Separator = ", ";
+
if (P.isAlwaysTrue())
Explanation += " always-true";
if (P.isImmediatePattern())
@@ -217,6 +219,17 @@ static std::string explainPredicates(const TreePatternNode *N) {
Explanation += (" MemVT=" + VT->getName()).str();
if (Record *VT = P.getScalarMemoryVT())
Explanation += (" ScalarVT(MemVT)=" + VT->getName()).str();
+
+ if (P.isAtomicOrderingMonotonic())
+ Explanation += " monotonic";
+ if (P.isAtomicOrderingAcquire())
+ Explanation += " acquire";
+ if (P.isAtomicOrderingRelease())
+ Explanation += " release";
+ if (P.isAtomicOrderingAcquireRelease())
+ Explanation += " acq_rel";
+ if (P.isAtomicOrderingSequentiallyConsistent())
+ Explanation += " seq_cst";
}
return Explanation;
}
@@ -253,16 +266,26 @@ static Error isTrivialOperatorNode(const TreePatternNode *N) {
if (Predicate.isImmediatePattern())
continue;
- if (Predicate.isLoad() && Predicate.isUnindexed())
+ if (Predicate.isNonExtLoad())
continue;
- if (Predicate.isNonExtLoad())
+ if (Predicate.isNonTruncStore())
continue;
- if (Predicate.isStore() && Predicate.isUnindexed())
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ if (Predicate.isUnindexed())
+ continue;
+ }
+
+ if (Predicate.isAtomic() && Predicate.getMemoryVT())
continue;
- if (Predicate.isNonTruncStore())
+ if (Predicate.isAtomic() &&
+ (Predicate.isAtomicOrderingMonotonic() ||
+ Predicate.isAtomicOrderingAcquire() ||
+ Predicate.isAtomicOrderingRelease() ||
+ Predicate.isAtomicOrderingAcquireRelease() ||
+ Predicate.isAtomicOrderingSequentiallyConsistent()))
continue;
HasUnsupportedPredicate = true;
@@ -1172,7 +1195,7 @@ protected:
enum PredicateKind {
IPM_Opcode,
IPM_ImmPredicate,
- IPM_NonAtomicMMO,
+ IPM_AtomicOrderingMMO,
};
PredicateKind Kind;
@@ -1301,20 +1324,25 @@ public:
}
};
-/// Generates code to check that a memory instruction has a non-atomic MachineMemoryOperand.
-class NonAtomicMMOPredicateMatcher : public InstructionPredicateMatcher {
+/// Generates code to check that a memory instruction has a atomic ordering
+/// MachineMemoryOperand.
+class AtomicOrderingMMOPredicateMatcher : public InstructionPredicateMatcher {
+ StringRef Order;
+
public:
- NonAtomicMMOPredicateMatcher()
- : InstructionPredicateMatcher(IPM_NonAtomicMMO) {}
+ AtomicOrderingMMOPredicateMatcher(StringRef Order)
+ : InstructionPredicateMatcher(IPM_AtomicOrderingMMO), Order(Order) {}
static bool classof(const InstructionPredicateMatcher *P) {
- return P->getKind() == IPM_NonAtomicMMO;
+ return P->getKind() == IPM_AtomicOrderingMMO;
}
void emitPredicateOpcodes(MatchTable &Table, RuleMatcher &Rule,
unsigned InsnVarID) const override {
- Table << MatchTable::Opcode("GIM_CheckNonAtomic")
+ Table << MatchTable::Opcode("GIM_CheckAtomicOrdering")
<< MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Order")
+ << MatchTable::NamedValue(("(int64_t)AtomicOrdering::" + Order).str())
<< MatchTable::LineBreak;
}
};
@@ -2474,49 +2502,65 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
continue;
}
- // No check required. A G_LOAD is an unindexed load.
- if (Predicate.isLoad() && Predicate.isUnindexed())
- continue;
-
// No check required. G_LOAD by itself is a non-extending load.
if (Predicate.isNonExtLoad())
continue;
- if (Predicate.isLoad() && Predicate.getMemoryVT() != nullptr) {
- Optional<LLTCodeGen> MemTyOrNone =
- MVTToLLT(getValueType(Predicate.getMemoryVT()));
-
- if (!MemTyOrNone)
- return failedImport("MemVT could not be converted to LLT");
-
- InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(MemTyOrNone.getValue());
- continue;
- }
-
- // No check required. A G_STORE is an unindexed store.
- if (Predicate.isStore() && Predicate.isUnindexed())
- continue;
-
// No check required. G_STORE by itself is a non-extending store.
if (Predicate.isNonTruncStore())
continue;
- if (Predicate.isStore() && Predicate.getMemoryVT() != nullptr) {
- Optional<LLTCodeGen> MemTyOrNone =
- MVTToLLT(getValueType(Predicate.getMemoryVT()));
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ if (Predicate.getMemoryVT() != nullptr) {
+ Optional<LLTCodeGen> MemTyOrNone =
+ MVTToLLT(getValueType(Predicate.getMemoryVT()));
- if (!MemTyOrNone)
- return failedImport("MemVT could not be converted to LLT");
+ if (!MemTyOrNone)
+ return failedImport("MemVT could not be converted to LLT");
- InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(MemTyOrNone.getValue());
- continue;
+ InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(
+ MemTyOrNone.getValue());
+ continue;
+ }
+ }
+
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ // No check required. A G_LOAD/G_STORE is an unindexed load.
+ if (Predicate.isUnindexed())
+ continue;
+ }
+
+ if (Predicate.isAtomic()) {
+ if (Predicate.isAtomicOrderingMonotonic()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Monotonic");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingAcquire()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Acquire");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Release");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingAcquireRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "AcquireRelease");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingSequentiallyConsistent()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "SequentiallyConsistent");
+ continue;
+ }
}
return failedImport("Src pattern child has predicate (" +
explainPredicates(Src) + ")");
}
if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic"))
- InsnMatcher.addPredicate<NonAtomicMMOPredicateMatcher>();
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("NotAtomic");
if (Src->isLeaf()) {
Init *SrcInit = Src->getLeafValue();