summaryrefslogtreecommitdiff
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorDaniel Sanders <daniel_l_sanders@apple.com>2017-12-04 20:39:32 +0000
committerDaniel Sanders <daniel_l_sanders@apple.com>2017-12-04 20:39:32 +0000
commite35711676bd03eb1caf7a9cb41179d82121c0457 (patch)
tree7f503474d78f64753f5e7b7775e0a3753748a0f6 /test/CodeGen/AArch64
parent07f4dc367b2fe397d356114da57ed58556ad231c (diff)
[globalisel][tablegen] Split atomic load/store into separate opcode and enable for AArch64.
This patch splits atomics out of the generic G_LOAD/G_STORE and into their own G_ATOMIC_LOAD/G_ATOMIC_STORE. This is a pragmatic decision rather than a necessary one. Atomic load/store has little in implementation in common with non-atomic load/store. They tend to be handled very differently throughout the backend. It also has the nice side-effect of slightly improving the common-case performance at ISel since there's no longer a need for an atomicity check in the matcher table. All targets have been updated to remove the atomic load/store check from the G_LOAD/G_STORE path. AArch64 has also been updated to mark G_ATOMIC_LOAD/G_ATOMIC_STORE legal. There is one issue with this patch though which also affects the extending loads and truncating stores. The rules only match when an appropriate G_ANYEXT is present in the MIR. For example, (G_ATOMIC_STORE (G_TRUNC:s16 (G_ANYEXT:s32 (G_ATOMIC_LOAD:s16 X)))) will match but: (G_ATOMIC_STORE (G_ATOMIC_LOAD:s16 X)) will not. This shouldn't be a problem at the moment, but as we get better at eliminating extends/truncates we'll likely start failing to match in some cases. The current plan is to fix this in a patch that changes the representation of extending-load/truncating-store to allow the MMO to describe a different type to the operation. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319691 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll20
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll12
2 files changed, 11 insertions, 21 deletions
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index aa81c3aff89..afa003c4dd9 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -40,10 +40,10 @@ define [1 x double] @constant() {
ret [1 x double] [double 1.0]
}
- ; The key problem here is that we may fail to create an MBB referenced by a
- ; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
- ; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
+; The key problem here is that we may fail to create an MBB referenced by a
+; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
+; happen.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate constant: [1 x double] (in function: pending_phis)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -54,7 +54,7 @@ end:
ret i32 %res
true:
- store atomic i32 42, i32* %addr seq_cst, align 4
+ %t = extractvalue [1 x double] [double 1.0], 0
br label %end
false:
@@ -90,16 +90,6 @@ define i128 @sequence_sizes([8 x i8] %in) {
ret i128 undef
}
-; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2<def>(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
-; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
-; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
-define i64 @atomic_ops(i64* %addr) {
- store atomic i64 0, i64* %addr unordered, align 8
- %res = load atomic i64, i64* %addr seq_cst, align 8
- ret i64 %res
-}
-
; Make sure we don't mess up metadata arguments.
declare void @llvm.write_register.i64(metadata, i64)
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index e7868327975..62203213a78 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -1332,12 +1332,12 @@ define void @test_lifetime_intrin() {
define void @test_load_store_atomics(i8* %addr) {
; CHECK-LABEL: name: test_load_store_atomics
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
-; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
-; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
-; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
-; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
-; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
+; CHECK: [[V0:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
+; CHECK: G_ATOMIC_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
+; CHECK: [[V1:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
+; CHECK: G_ATOMIC_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
+; CHECK: [[V2:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
+; CHECK: G_ATOMIC_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
%v0 = load atomic i8, i8* %addr unordered, align 1
store atomic i8 %v0, i8* %addr monotonic, align 1