summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/atomic-load-store.ll
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2013-07-01 14:48:48 +0000
committerTim Northover <tnorthover@apple.com>2013-07-01 14:48:48 +0000
commitd59fc0af0a3ebd13c7004511e64e3233dfe87b17 (patch)
tree1cc4dbfa52c9735cb04654858ebc786c87a15509 /test/CodeGen/ARM/atomic-load-store.ll
parent728af3d574895dd9e4bb5c418c7398297c4f39fe (diff)
ARM: relax the atomic release barrier to "dmb ishst"
I believe the full "dmb ish" barrier is not required to guarantee release semantics for atomic operations. The weaker "dmb ishst" prevents previous operations being reordered with a store executed afterwards, which is enough. A key point to note (fortunately already correct) is that this barrier alone is *insufficient* for sequential consistency, no matter how liberally placed. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@185339 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM/atomic-load-store.ll')
-rw-r--r--test/CodeGen/ARM/atomic-load-store.ll46
1 files changed, 40 insertions, 6 deletions
diff --git a/test/CodeGen/ARM/atomic-load-store.ll b/test/CodeGen/ARM/atomic-load-store.ll
index 66916a7c2e2..7ae71298a33 100644
--- a/test/CodeGen/ARM/atomic-load-store.ll
+++ b/test/CodeGen/ARM/atomic-load-store.ll
@@ -6,15 +6,15 @@
define void @test1(i32* %ptr, i32 %val1) {
; ARM: test1
-; ARM: dmb ish
+; ARM: dmb ishst
; ARM-NEXT: str
-; ARM-NEXT: dmb ish
+; ARM-NEXT: dmb {{ish$}}
; THUMBONE: test1
; THUMBONE: __sync_lock_test_and_set_4
; THUMBTWO: test1
-; THUMBTWO: dmb ish
+; THUMBTWO: dmb ishst
; THUMBTWO-NEXT: str
-; THUMBTWO-NEXT: dmb ish
+; THUMBTWO-NEXT: dmb {{ish$}}
store atomic i32 %val1, i32* %ptr seq_cst, align 4
ret void
}
@@ -22,12 +22,12 @@ define void @test1(i32* %ptr, i32 %val1) {
define i32 @test2(i32* %ptr) {
; ARM: test2
; ARM: ldr
-; ARM-NEXT: dmb ish
+; ARM-NEXT: dmb {{ish$}}
; THUMBONE: test2
; THUMBONE: __sync_val_compare_and_swap_4
; THUMBTWO: test2
; THUMBTWO: ldr
-; THUMBTWO-NEXT: dmb ish
+; THUMBTWO-NEXT: dmb {{ish$}}
%val = load atomic i32* %ptr seq_cst, align 4
ret i32 %val
}
@@ -69,3 +69,37 @@ define void @test_old_store_64bit(i64* %p, i64 %v) {
store atomic i64 %v, i64* %p seq_cst, align 8
ret void
}
+
+; Release operations only need the store barrier provided by a "dmb ishst",
+
+define void @test_store_release(i32* %p, i32 %v) {
+; ARM: test_store_release:
+; ARM: dmb ishst
+; THUMBTWO: test_store_release:
+; THUMBTWO: dmb ishst
+
+ store atomic i32 %v, i32* %p release, align 4
+ ret void
+}
+
+; However, if sequential consistency is needed *something* must ensure a release
+; followed by an acquire does not get reordered. In that case a "dmb ishst" is
+; not adequate.
+define i32 @test_seq_cst(i32* %p, i32 %v) {
+; ARM: test_seq_cst:
+; ARM: dmb ishst
+; ARM: str
+; ARM: dmb {{ish$}}
+; ARM: ldr
+; ARM: dmb {{ish$}}
+
+; THUMBTWO: test_seq_cst:
+; THUMBTWO: dmb ishst
+; THUMBTWO: str
+; THUMBTWO: dmb {{ish$}}
+; THUMBTWO: ldr
+; THUMBTWO: dmb {{ish$}}
+ store atomic i32 %v, i32* %p seq_cst, align 4
+ %val = load atomic i32* %p seq_cst, align 4
+ ret i32 %val
+}