summaryrefslogtreecommitdiff
path: root/test/Bitcode
diff options
context:
space:
mode:
authorKonstantin Zhuravlyov <kzhuravl_dev@outlook.com>2017-07-11 22:23:00 +0000
committerKonstantin Zhuravlyov <kzhuravl_dev@outlook.com>2017-07-11 22:23:00 +0000
commit8f85685860c0f6018a83b804f33e47f8122a9eba (patch)
tree70998f02d9f575729c633331fa2b8d3fcfaf3c33 /test/Bitcode
parentfdda7ea9d5f2dea87392cde2577c6ea6fd142433 (diff)
Enhance synchscope representation
OpenCL 2.0 introduces the notion of memory scopes in atomic operations to global and local memory. These scopes restrict how synchronization is achieved, which can result in improved performance. This change extends existing notion of synchronization scopes in LLVM to support arbitrary scopes expressed as target-specific strings, in addition to the already defined scopes (single thread, system). The LLVM IR and MIR syntax for expressing synchronization scopes has changed to use *syncscope("<scope>")*, where <scope> can be "singlethread" (this replaces *singlethread* keyword), or a target-specific name. As before, if the scope is not specified, it defaults to CrossThread/System scope. Implementation details: - Mapping from synchronization scope name/string to synchronization scope id is stored in LLVM context; - CrossThread/System and SingleThread scopes are pre-defined to efficiently check for known scopes without comparing strings; - Synchronization scope names are stored in SYNC_SCOPE_NAMES_BLOCK in the bitcode. Differential Revision: https://reviews.llvm.org/D21723 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@307722 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Bitcode')
-rw-r--r--test/Bitcode/atomic-no-syncscope.ll17
-rw-r--r--test/Bitcode/atomic-no-syncscope.ll.bcbin0 -> 1000 bytes
-rw-r--r--test/Bitcode/atomic.ll4
-rw-r--r--test/Bitcode/compatibility-3.6.ll24
-rw-r--r--test/Bitcode/compatibility-3.7.ll24
-rw-r--r--test/Bitcode/compatibility-3.8.ll24
-rw-r--r--test/Bitcode/compatibility-3.9.ll24
-rw-r--r--test/Bitcode/compatibility-4.0.ll24
-rw-r--r--test/Bitcode/compatibility.ll24
-rw-r--r--test/Bitcode/memInstructions.3.2.ll104
10 files changed, 143 insertions, 126 deletions
diff --git a/test/Bitcode/atomic-no-syncscope.ll b/test/Bitcode/atomic-no-syncscope.ll
new file mode 100644
index 00000000000..a57507bc814
--- /dev/null
+++ b/test/Bitcode/atomic-no-syncscope.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-dis -o - %s.bc | FileCheck %s
+
+; Backwards compatibility test: make sure we can process bitcode without
+; synchronization scope names encoded in it.
+
+; CHECK: load atomic i32, i32* %x unordered, align 4
+; CHECK: load atomic volatile i32, i32* %x syncscope("singlethread") acquire, align 4
+; CHECK: store atomic i32 3, i32* %x release, align 4
+; CHECK: store atomic volatile i32 3, i32* %x syncscope("singlethread") monotonic, align 4
+; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
+; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
+; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
+; CHECK: cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
+; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
+; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
+; CHECK: fence syncscope("singlethread") release
+; CHECK: fence seq_cst
diff --git a/test/Bitcode/atomic-no-syncscope.ll.bc b/test/Bitcode/atomic-no-syncscope.ll.bc
new file mode 100644
index 00000000000..01d565eb442
--- /dev/null
+++ b/test/Bitcode/atomic-no-syncscope.ll.bc
Binary files differ
diff --git a/test/Bitcode/atomic.ll b/test/Bitcode/atomic.ll
index c09e74c1c2f..bef3f271293 100644
--- a/test/Bitcode/atomic.ll
+++ b/test/Bitcode/atomic.ll
@@ -11,8 +11,8 @@ define void @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
- cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic
- ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic
+ cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
+ ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
ret void
}
diff --git a/test/Bitcode/compatibility-3.6.ll b/test/Bitcode/compatibility-3.6.ll
index 8d51ee11a20..cf6c30e7c26 100644
--- a/test/Bitcode/compatibility-3.6.ll
+++ b/test/Bitcode/compatibility-3.6.ll
@@ -551,8 +551,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -571,33 +571,33 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
; XXX: The parser spits out the load type here.
%ld.1 = load atomic i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-3.7.ll b/test/Bitcode/compatibility-3.7.ll
index ebdf4c30587..180dad258b6 100644
--- a/test/Bitcode/compatibility-3.7.ll
+++ b/test/Bitcode/compatibility-3.7.ll
@@ -596,8 +596,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -616,32 +616,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-3.8.ll b/test/Bitcode/compatibility-3.8.ll
index 57ea3e06837..370c7f51a2b 100644
--- a/test/Bitcode/compatibility-3.8.ll
+++ b/test/Bitcode/compatibility-3.8.ll
@@ -627,8 +627,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -647,32 +647,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-3.9.ll b/test/Bitcode/compatibility-3.9.ll
index 2a6cfe14cdb..4115cbd8fe6 100644
--- a/test/Bitcode/compatibility-3.9.ll
+++ b/test/Bitcode/compatibility-3.9.ll
@@ -698,8 +698,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -718,32 +718,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility-4.0.ll b/test/Bitcode/compatibility-4.0.ll
index c83c107a292..eef925564ec 100644
--- a/test/Bitcode/compatibility-4.0.ll
+++ b/test/Bitcode/compatibility-4.0.ll
@@ -698,8 +698,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -718,32 +718,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/compatibility.ll b/test/Bitcode/compatibility.ll
index ec69344947c..ebd727ba9ae 100644
--- a/test/Bitcode/compatibility.ll
+++ b/test/Bitcode/compatibility.ll
@@ -705,8 +705,8 @@ define void @atomics(i32* %word) {
; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
@@ -725,32 +725,32 @@ define void @atomics(i32* %word) {
; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
- %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence release
fence acq_rel
; CHECK: fence acq_rel
- fence singlethread seq_cst
- ; CHECK: fence singlethread seq_cst
+ fence syncscope("singlethread") seq_cst
+ ; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
; CHECK: store atomic i32 23, i32* %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
- store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
ret void
}
diff --git a/test/Bitcode/memInstructions.3.2.ll b/test/Bitcode/memInstructions.3.2.ll
index 1ab05b6d1b4..c530b6d2cb1 100644
--- a/test/Bitcode/memInstructions.3.2.ll
+++ b/test/Bitcode/memInstructions.3.2.ll
@@ -107,29 +107,29 @@ entry:
; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
%res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
- %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
- %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
- %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+ %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
-; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
- %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
-; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
- %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
- %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
- %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+ %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
-; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
- %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
ret void
}
@@ -193,29 +193,29 @@ entry:
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
- store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
- store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread release, align 1
- store atomic i8 2, i8* %ptr1 singlethread release, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
- store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
- store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+ store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
ret void
}
@@ -232,13 +232,13 @@ entry:
; CHECK-NEXT: %res2 = extractvalue { i32, i1 } [[TMP]], 0
%res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: %res3 = extractvalue { i32, i1 } [[TMP]], 0
- %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+ %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: %res4 = extractvalue { i32, i1 } [[TMP]], 0
- %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+ %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
@@ -249,13 +249,13 @@ entry:
; CHECK-NEXT: %res6 = extractvalue { i32, i1 } [[TMP]], 0
%res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0
- %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+ %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: %res8 = extractvalue { i32, i1 } [[TMP]], 0
- %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+ %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
@@ -266,13 +266,13 @@ entry:
; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0
%res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: %res11 = extractvalue { i32, i1 } [[TMP]], 0
- %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+ %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: %res12 = extractvalue { i32, i1 } [[TMP]], 0
- %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+ %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
@@ -283,13 +283,13 @@ entry:
; CHECK-NEXT: %res14 = extractvalue { i32, i1 } [[TMP]], 0
%res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: %res15 = extractvalue { i32, i1 } [[TMP]], 0
- %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+ %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: %res16 = extractvalue { i32, i1 } [[TMP]], 0
- %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+ %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
@@ -300,13 +300,13 @@ entry:
; CHECK-NEXT: %res18 = extractvalue { i32, i1 } [[TMP]], 0
%res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
; CHECK-NEXT: %res19 = extractvalue { i32, i1 } [[TMP]], 0
- %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+ %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
; CHECK-NEXT: %res20 = extractvalue { i32, i1 } [[TMP]], 0
- %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+ %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
ret void
}