summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2018-08-09 12:37:40 +0000
committerHans Wennborg <hans@hanshq.net>2018-08-09 12:37:40 +0000
commitf7ec630085d9746dc26713dfb7c2c0fc8a691251 (patch)
tree43bbad721c0e98c8a7cd8d87e16f43b6d082c5ac /test
parentef7dd703b18efb1109df25124f91d05444358790 (diff)
Merging r339316:
------------------------------------------------------------------------ r339316 | hahnfeld | 2018-08-09 09:45:49 +0200 (Thu, 09 Aug 2018) | 16 lines [NVPTX] Select atomic loads and stores According to PTX ISA .volatile has the same memory synchronization semantics as .relaxed.sys, so it can be used to implement monotonic atomic loads and stores. This is important for OpenMP's atomic construct where - 'read's and 'write's are lowered to atomic loads and stores, and - an update of float or double types are lowered into a cmpxchg loop. (Note that PTX could do better because it has atom.add.f{32,64} but LLVM's atomicrmw instruction only allows integer types.) Higher levels of atomicity (like acquire and release) need additional synchronization properties which were added with PTX ISA 6.0 / sm_70. So using these instructions still results in an error. Differential Revision: https://reviews.llvm.org/D50391 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_70@339338 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/NVPTX/load-store.ll88
1 files changed, 88 insertions, 0 deletions
diff --git a/test/CodeGen/NVPTX/load-store.ll b/test/CodeGen/NVPTX/load-store.ll
new file mode 100644
index 00000000000..03b0109dea2
--- /dev/null
+++ b/test/CodeGen/NVPTX/load-store.ll
@@ -0,0 +1,88 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+; CHECK-LABEL: plain
+define void @plain(i8* %a, i16* %b, i32* %c, i64* %d) local_unnamed_addr {
+ ; CHECK: ld.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load i8, i8* %a
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store i8 %a.add, i8* %a
+
+ ; CHECK: ld.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load i16, i16* %b
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store i16 %b.add, i16* %b
+
+ ; CHECK: ld.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load i32, i32* %c
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store i32 %c.add, i32* %c
+
+ ; CHECK: ld.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load i64, i64* %d
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store i64 %d.add, i64* %d
+
+ ret void
+}
+
+; CHECK-LABEL: volatile
+define void @volatile(i8* %a, i16* %b, i32* %c, i64* %d) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load volatile i8, i8* %a
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store volatile i8 %a.add, i8* %a
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load volatile i16, i16* %b
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store volatile i16 %b.add, i16* %b
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load volatile i32, i32* %c
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store volatile i32 %c.add, i32* %c
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load volatile i64, i64* %d
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store volatile i64 %d.add, i64* %d
+
+ ret void
+}
+
+; CHECK-LABEL: monotonic
+define void @monotonic(i8* %a, i16* %b, i32* %c, i64* %d) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, i8* %a monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, i8* %a monotonic, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, i16* %b monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, i16* %b monotonic, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, i32* %c monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, i32* %c monotonic, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, i64* %d monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, i64* %d monotonic, align 8
+
+ ret void
+}