summaryrefslogtreecommitdiff
path: root/test/CodeGen/WebAssembly
diff options
context:
space:
mode:
authorDerek Schuff <dschuff@google.com>2017-10-05 21:18:42 +0000
committerDerek Schuff <dschuff@google.com>2017-10-05 21:18:42 +0000
commit365cb32dfede399fb95996ba7acb684d1e10d738 (patch)
treeb0d6b36310eb3d1ed03a5be453ed09752229f58e /test/CodeGen/WebAssembly
parentb1eb7bb6ec1a90b6b7f774098e309a93beda2bd4 (diff)
[WebAssembly] Add the rest of the atomic loads
Add extending loads and constant offset patterns A bit more refactoring of the tablegen to make the patterns fairly nice and uniform between the regular and atomic loads. Differential Revision: https://reviews.llvm.org/D38523 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@315022 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/WebAssembly')
-rw-r--r--test/CodeGen/WebAssembly/atomics.ll16
-rw-r--r--test/CodeGen/WebAssembly/i32-load-store-alignment.ll28
-rw-r--r--test/CodeGen/WebAssembly/i64-load-store-alignment.ll25
-rw-r--r--test/CodeGen/WebAssembly/load-ext-atomic.ll102
-rw-r--r--test/CodeGen/WebAssembly/offset-atomics.ll307
5 files changed, 460 insertions, 18 deletions
diff --git a/test/CodeGen/WebAssembly/atomics.ll b/test/CodeGen/WebAssembly/atomics.ll
deleted file mode 100644
index 80b8b8c793c..00000000000
--- a/test/CodeGen/WebAssembly/atomics.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: not llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=+atomics | FileCheck %s
-
-; Test that atomic loads are assembled properly.
-
-target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown-wasm"
-
-; CHECK-LABEL: load_i32_atomic:
-; CHECK: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
-; CHECK-NEXT: return $pop[[NUM]]{{$}}
-
-define i32 @load_i32_atomic(i32 *%p) {
- %v = load atomic i32, i32* %p seq_cst, align 4
- ret i32 %v
-}
diff --git a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
index 661d1b7bfc3..1296632cca3 100644
--- a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test loads and stores with custom alignment values.
@@ -210,3 +210,29 @@ define void @sti16_a4(i16 *%p, i16 %v) {
store i16 %v, i16* %p, align 4
ret void
}
+
+; Atomics.
+; Wasm atomics have the alignment field, but it must always have the
+; type's natural alignment.
+
+; CHECK-LABEL: ldi32_atomic_a4:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_atomic_a4(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ ret i32 %v
+}
+
+; 8 is greater than the default alignment so it is rounded down to 4
+
+; CHECK-LABEL: ldi32_atomic_a8:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_atomic_a8(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 8
+ ret i32 %v
+}
diff --git a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
index 1ccb74cb9d2..757f785cfd6 100644
--- a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test loads and stores with custom alignment values.
@@ -323,3 +323,26 @@ define void @sti32_a8(i32 *%p, i64 %w) {
store i32 %v, i32* %p, align 8
ret void
}
+
+; Atomics.
+; CHECK-LABEL: ldi64_atomic_a8:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_atomic_a8(i64 *%p) {
+ %v = load atomic i64, i64* %p seq_cst, align 8
+ ret i64 %v
+}
+
+; 16 is greater than the default alignment so it is ignored.
+
+; CHECK-LABEL: ldi64_atomic_a16:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_atomic_a16(i64 *%p) {
+ %v = load atomic i64, i64* %p seq_cst, align 16
+ ret i64 %v
+}
diff --git a/test/CodeGen/WebAssembly/load-ext-atomic.ll b/test/CodeGen/WebAssembly/load-ext-atomic.ll
new file mode 100644
index 00000000000..0c4552dc9af
--- /dev/null
+++ b/test/CodeGen/WebAssembly/load-ext-atomic.ll
@@ -0,0 +1,102 @@
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+
+; Test that extending loads are assembled properly.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: sext_i8_i32:
+; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i32 @sext_i8_i32(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = sext i8 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: zext_i8_i32:
+; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i32 @zext_i8_i32(i8 *%p) {
+e1:
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = zext i8 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: sext_i16_i32:
+; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i32 @sext_i16_i32(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = sext i16 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: zext_i16_i32:
+; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i32 @zext_i16_i32(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = zext i16 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: sext_i8_i64:
+; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK: i64.extend8_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i8_i64(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = sext i8 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i8_i64:
+; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i64 @zext_i8_i64(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = zext i8 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: sext_i16_i64:
+; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK: i64.extend16_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i16_i64(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = sext i16 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i16_i64:
+; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i64 @zext_i16_i64(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = zext i16 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: sext_i32_i64:
+; CHECK: i32.atomic.load $push0=, 0($0){{$}}
+; CHECK: i64.extend_s/i32 $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i32_i64(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ %e = sext i32 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i32_i64:
+; CHECK: i64.atomic.load32_u $push0=, 0($0){{$}}
+; CHECK: return $pop0{{$}}
+define i64 @zext_i32_i64(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ %e = zext i32 %v to i64
+ ret i64 %e
+}
diff --git a/test/CodeGen/WebAssembly/offset-atomics.ll b/test/CodeGen/WebAssembly/offset-atomics.ll
new file mode 100644
index 00000000000..24727fc2608
--- /dev/null
+++ b/test/CodeGen/WebAssembly/offset-atomics.ll
@@ -0,0 +1,307 @@
+; RUN: not llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=+atomics | FileCheck %s
+
+; Test that atomic loads are assembled properly.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: load_i32_no_offset:
+; CHECK: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @load_i32_no_offset(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ ret i32 %v
+}
+
+; With an nuw add, we can fold an offset.
+
+; CHECK-LABEL: load_i32_with_folded_offset:
+; CHECK: i32.atomic.load $push0=, 24($0){{$}}
+define i32 @load_i32_with_folded_offset(i32* %p) {
+ %q = ptrtoint i32* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; With an inbounds gep, we can fold an offset.
+
+; CHECK-LABEL: load_i32_with_folded_gep_offset:
+; CHECK: i32.atomic.load $push0=, 24($0){{$}}
+define i32 @load_i32_with_folded_gep_offset(i32* %p) {
+ %s = getelementptr inbounds i32, i32* %p, i32 6
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; We can't fold a negative offset though, even with an inbounds gep.
+
+; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
+; CHECK: i32.const $push0=, -24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
+define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
+ %s = getelementptr inbounds i32, i32* %p, i32 -6
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; Without nuw, and even with nsw, we can't fold an offset.
+
+; CHECK-LABEL: load_i32_with_unfolded_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
+define i32 @load_i32_with_unfolded_offset(i32* %p) {
+ %q = ptrtoint i32* %p to i32
+ %r = add nsw i32 %q, 24
+ %s = inttoptr i32 %r to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; Without inbounds, we can't fold a gep offset.
+
+; CHECK-LABEL: load_i32_with_unfolded_gep_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
+define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
+ %s = getelementptr i32, i32* %p, i32 6
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+; CHECK-LABEL: load_i64_no_offset:
+; CHECK: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @load_i64_no_offset(i64 *%p) {
+ %v = load atomic i64, i64* %p seq_cst, align 8
+ ret i64 %v
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_folded_offset:
+; CHECK: i64.atomic.load $push0=, 24($0){{$}}
+define i64 @load_i64_with_folded_offset(i64* %p) {
+ %q = ptrtoint i64* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i64*
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.load $push0=, 24($0){{$}}
+define i64 @load_i64_with_folded_gep_offset(i64* %p) {
+ %s = getelementptr inbounds i64, i64* %p, i32 3
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_unfolded_gep_negative_offset:
+; CHECK: i32.const $push0=, -24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
+define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
+ %s = getelementptr inbounds i64, i64* %p, i32 -3
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_unfolded_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
+define i64 @load_i64_with_unfolded_offset(i64* %p) {
+ %q = ptrtoint i64* %p to i32
+ %r = add nsw i32 %q, 24
+ %s = inttoptr i32 %r to i64*
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; Same as above but with i64.
+
+; CHECK-LABEL: load_i64_with_unfolded_gep_offset:
+; CHECK: i32.const $push0=, 24{{$}}
+; CHECK: i32.add $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
+define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
+ %s = getelementptr i64, i64* %p, i32 3
+ %t = load atomic i64, i64* %s seq_cst, align 8
+ ret i64 %t
+}
+
+; CHECK-LABEL: load_i32_with_folded_or_offset:
+; CHECK: i32.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
+; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
+define i32 @load_i32_with_folded_or_offset(i32 %x) {
+ %and = and i32 %x, -4
+ %t0 = inttoptr i32 %and to i8*
+ %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+ %t1 = load atomic i8, i8* %arrayidx seq_cst, align 8
+ %conv = sext i8 %t1 to i32
+ ret i32 %conv
+}
+
+; When loading from a fixed address, materialize a zero.
+
+; CHECK-LABEL: load_i32_from_numeric_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load $push1=, 42($pop0){{$}}
+define i32 @load_i32_from_numeric_address() {
+ %s = inttoptr i32 42 to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ ret i32 %t
+}
+
+
+; CHECK-LABEL: load_i32_from_global_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load $push1=, gv($pop0){{$}}
+@gv = global i32 0
+define i32 @load_i32_from_global_address() {
+ %t = load atomic i32, i32* @gv seq_cst, align 4
+ ret i32 %t
+}
+
+; Fold an offset into a sign-extending load.
+
+; CHECK-LABEL: load_i8_s_with_folded_offset:
+; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
+; CHECK-NEXT: i32.extend8_s $push1=, $pop0
+define i32 @load_i8_s_with_folded_offset(i8* %p) {
+ %q = ptrtoint i8* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i8*
+ %t = load atomic i8, i8* %s seq_cst, align 1
+ %u = sext i8 %t to i32
+ ret i32 %u
+}
+
+; Fold a gep offset into a sign-extending load.
+
+; CHECK-LABEL: load_i8_s_with_folded_gep_offset:
+; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
+; CHECK-NEXT: i32.extend8_s $push1=, $pop0
+define i32 @load_i8_s_with_folded_gep_offset(i8* %p) {
+ %s = getelementptr inbounds i8, i8* %p, i32 24
+ %t = load atomic i8, i8* %s seq_cst, align 1
+ %u = sext i8 %t to i32
+ ret i32 %u
+}
+
+; CHECK-LABEL: load_i16_s_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.load16_u $push0=, 6($0){{$}}
+define i64 @load_i16_s_i64_with_folded_gep_offset(i16* %p) {
+ %s = getelementptr inbounds i16, i16* %p, i32 3
+ %t = load atomic i16, i16* %s seq_cst, align 2
+ %u = zext i16 %t to i64
+ ret i64 %u
+}
+
+; CHECK-LABEL: load_i64_with_folded_or_offset:
+; CHECK: i64.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
+; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
+define i64 @load_i64_with_folded_or_offset(i32 %x) {
+ %and = and i32 %x, -4
+ %t0 = inttoptr i32 %and to i8*
+ %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+ %t1 = load atomic i8, i8* %arrayidx seq_cst, align 8
+ %conv = sext i8 %t1 to i64
+ ret i64 %conv
+}
+
+
+; Fold an offset into a zero-extending load.
+
+; CHECK-LABEL: load_i16_u_with_folded_offset:
+; CHECK: i32.atomic.load16_u $push0=, 24($0){{$}}
+define i32 @load_i16_u_with_folded_offset(i8* %p) {
+ %q = ptrtoint i8* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i16*
+ %t = load atomic i16, i16* %s seq_cst, align 2
+ %u = zext i16 %t to i32
+ ret i32 %u
+}
+
+; Fold a gep offset into a zero-extending load.
+
+; CHECK-LABEL: load_i8_u_with_folded_gep_offset:
+; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
+define i32 @load_i8_u_with_folded_gep_offset(i8* %p) {
+ %s = getelementptr inbounds i8, i8* %p, i32 24
+ %t = load atomic i8, i8* %s seq_cst, align 1
+ %u = zext i8 %t to i32
+ ret i32 %u
+}
+
+
+; When loading from a fixed address, materialize a zero.
+; As above but with extending load.
+
+; CHECK-LABEL: load_zext_i32_from_numeric_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load16_u $push1=, 42($pop0){{$}}
+define i32 @load_zext_i32_from_numeric_address() {
+ %s = inttoptr i32 42 to i16*
+ %t = load atomic i16, i16* %s seq_cst, align 2
+ %u = zext i16 %t to i32
+ ret i32 %u
+}
+
+; CHECK-LABEL: load_sext_i32_from_global_address
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.atomic.load8_u $push1=, gv8($pop0){{$}}
+; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
+@gv8 = global i8 0
+define i32 @load_sext_i32_from_global_address() {
+ %t = load atomic i8, i8* @gv8 seq_cst, align 1
+ %u = sext i8 %t to i32
+ ret i32 %u
+}
+
+; Fold an offset into a sign-extending load.
+; As above but 32 extended to 64 bit.
+; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
+; CHECK: i32.atomic.load $push0=, 24($0){{$}}
+; CHECK-NEXT: i64.extend_s/i32 $push1=, $pop0{{$}}
+define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
+ %q = ptrtoint i32* %p to i32
+ %r = add nuw i32 %q, 24
+ %s = inttoptr i32 %r to i32*
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ %u = sext i32 %t to i64
+ ret i64 %u
+}
+
+; Fold a gep offset into a zero-extending load.
+; As above but 32 extended to 64 bit.
+; CHECK-LABEL: load_i32_i64_u_with_folded_gep_offset:
+; CHECK: i64.atomic.load32_u $push0=, 96($0){{$}}
+define i64 @load_i32_i64_u_with_folded_gep_offset(i32* %p) {
+ %s = getelementptr inbounds i32, i32* %p, i32 24
+ %t = load atomic i32, i32* %s seq_cst, align 4
+ %u = zext i32 %t to i64
+ ret i64 %u
+}
+
+; i8 return value should test anyext loads
+; CHECK-LABEL: ldi8_a1:
+; CHECK: i32.atomic.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i8 @ldi8_a1(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ ret i8 %v
+}