summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-11-06 17:04:37 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-11-06 17:04:37 +0000
commitbd04b64cd125c715b1a31e5312f2d6d48d8e41f3 (patch)
tree4422efd04347d313d2418a04c1173b1358d8e23a /test
parent94bed7d968791da7617005c356fa939edefe9a1c (diff)
AMDGPU: Select v_mad_u64_u32 and v_mad_i64_i32
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317492 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/AMDGPU/mad_64_32.ll168
-rw-r--r--test/CodeGen/AMDGPU/mul.ll125
2 files changed, 239 insertions, 54 deletions
diff --git a/test/CodeGen/AMDGPU/mad_64_32.ll b/test/CodeGen/AMDGPU/mad_64_32.ll
new file mode 100644
index 00000000000..b4d9d928101
--- /dev/null
+++ b/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -0,0 +1,168 @@
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,CI %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
+
+; GCN-LABEL: {{^}}mad_i64_i32_sextops:
+; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3]
+
+; SI: v_mul_lo_i32
+; SI: v_mul_hi_i32
+; SI: v_add_i32
+; SI: v_addc_u32
+define i64 @mad_i64_i32_sextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
+ %sext0 = sext i32 %arg0 to i64
+ %sext1 = sext i32 %arg1 to i64
+ %mul = mul i64 %sext0, %sext1
+ %mad = add i64 %mul, %arg2
+ ret i64 %mad
+}
+
+; GCN-LABEL: {{^}}mad_i64_i32_sextops_commute:
+; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3]
+
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_hi_i32
+; SI: v_add_i32
+; SI: v_addc_u32
+define i64 @mad_i64_i32_sextops_commute(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
+ %sext0 = sext i32 %arg0 to i64
+ %sext1 = sext i32 %arg1 to i64
+ %mul = mul i64 %sext0, %sext1
+ %mad = add i64 %arg2, %mul
+ ret i64 %mad
+}
+
+; GCN-LABEL: {{^}}mad_u64_u32_zextops:
+; CI: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3]
+
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_hi_u32
+; SI: v_add_i32
+; SI: v_addc_u32
+define i64 @mad_u64_u32_zextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
+ %sext0 = zext i32 %arg0 to i64
+ %sext1 = zext i32 %arg1 to i64
+ %mul = mul i64 %sext0, %sext1
+ %mad = add i64 %mul, %arg2
+ ret i64 %mad
+}
+
+; GCN-LABEL: {{^}}mad_u64_u32_zextops_commute:
+; CI: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3]
+
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_hi_u32
+; SI: v_add_i32
+; SI: v_addc_u32
+define i64 @mad_u64_u32_zextops_commute(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
+ %sext0 = zext i32 %arg0 to i64
+ %sext1 = zext i32 %arg1 to i64
+ %mul = mul i64 %sext0, %sext1
+ %mad = add i64 %arg2, %mul
+ ret i64 %mad
+}
+
+
+
+
+
+
+; GCN-LABEL: {{^}}mad_i64_i32_sextops_i32_i128:
+; CI: v_mad_u64_u32
+; CI: v_mad_u64_u32
+; CI: v_mad_u64_u32
+; CI: v_mad_i64_i32
+
+; SI-NOT: v_mad_
+define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 {
+ %sext0 = sext i32 %arg0 to i128
+ %sext1 = sext i32 %arg1 to i128
+ %mul = mul i128 %sext0, %sext1
+ %mad = add i128 %mul, %arg2
+ ret i128 %mad
+}
+
+; GCN-LABEL: {{^}}mad_i64_i32_sextops_i32_i63:
+; CI: v_lshl_b64
+; CI: v_ashr
+; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3]
+
+; SI-NOT: v_mad_u64_u32
+define i63 @mad_i64_i32_sextops_i32_i63(i32 %arg0, i32 %arg1, i63 %arg2) #0 {
+ %sext0 = sext i32 %arg0 to i63
+ %sext1 = sext i32 %arg1 to i63
+ %mul = mul i63 %sext0, %sext1
+ %mad = add i63 %mul, %arg2
+ ret i63 %mad
+}
+
+; GCN-LABEL: {{^}}mad_i64_i32_sextops_i31_i63:
+; CI: v_lshl_b64
+; CI: v_ashr_i64
+; CI: v_bfe_i32 v1, v1, 0, 31
+; CI: v_bfe_i32 v0, v0, 0, 31
+; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3]
+define i63 @mad_i64_i32_sextops_i31_i63(i31 %arg0, i31 %arg1, i63 %arg2) #0 {
+ %sext0 = sext i31 %arg0 to i63
+ %sext1 = sext i31 %arg1 to i63
+ %mul = mul i63 %sext0, %sext1
+ %mad = add i63 %mul, %arg2
+ ret i63 %mad
+}
+
+; GCN-LABEL: {{^}}mad_u64_u32_bitops:
+; CI: v_mad_u64_u32 v[0:1], s[6:7], v0, v2, v[4:5]
+define i64 @mad_u64_u32_bitops(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
+ %trunc.lhs = and i64 %arg0, 4294967295
+ %trunc.rhs = and i64 %arg1, 4294967295
+ %mul = mul i64 %trunc.lhs, %trunc.rhs
+ %add = add i64 %mul, %arg2
+ ret i64 %add
+}
+
+; GCN-LABEL: {{^}}mad_u64_u32_bitops_lhs_mask_small:
+; GCN-NOT: v_mad_
+define i64 @mad_u64_u32_bitops_lhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
+ %trunc.lhs = and i64 %arg0, 8589934591
+ %trunc.rhs = and i64 %arg1, 4294967295
+ %mul = mul i64 %trunc.lhs, %trunc.rhs
+ %add = add i64 %mul, %arg2
+ ret i64 %add
+}
+
+; GCN-LABEL: {{^}}mad_u64_u32_bitops_rhs_mask_small:
+; GCN-NOT: v_mad_
+define i64 @mad_u64_u32_bitops_rhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
+ %trunc.lhs = and i64 %arg0, 4294967295
+ %trunc.rhs = and i64 %arg1, 8589934591
+ %mul = mul i64 %trunc.lhs, %trunc.rhs
+ %add = add i64 %mul, %arg2
+ ret i64 %add
+}
+
+; GCN-LABEL: {{^}}mad_i64_i32_bitops:
+; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v2, v[4:5]
+; SI-NOT: v_mad_
+define i64 @mad_i64_i32_bitops(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
+ %shl.lhs = shl i64 %arg0, 32
+ %trunc.lhs = ashr i64 %shl.lhs, 32
+ %shl.rhs = shl i64 %arg1, 32
+ %trunc.rhs = ashr i64 %shl.rhs, 32
+ %mul = mul i64 %trunc.lhs, %trunc.rhs
+ %add = add i64 %mul, %arg2
+ ret i64 %add
+}
+
+; Example from bug report
+; GCN-LABEL: {{^}}mad_i64_i32_unpack_i64ops:
+; CI: v_mad_u64_u32 v[0:1], s[6:7], v1, v0, v[0:1]
+; SI-NOT: v_mad_u64_u32
+define i64 @mad_i64_i32_unpack_i64ops(i64 %arg0) #0 {
+ %tmp4 = lshr i64 %arg0, 32
+ %tmp5 = and i64 %arg0, 4294967295
+ %mul = mul nuw i64 %tmp4, %tmp5
+ %mad = add i64 %mul, %arg0
+ ret i64 %mad
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/mul.ll b/test/CodeGen/AMDGPU/mul.ll
index a0290789175..8839fcdffbc 100644
--- a/test/CodeGen/AMDGPU/mul.ll
+++ b/test/CodeGen/AMDGPU/mul.ll
@@ -1,6 +1,6 @@
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
; mul24 and mad24 are affected
@@ -8,8 +8,8 @@
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
@@ -26,10 +26,10 @@ define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
@@ -41,10 +41,10 @@ define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> a
}
; FUNC-LABEL: {{^}}s_trunc_i64_mul_to_i32:
-; SI: s_load_dword
-; SI: s_load_dword
-; SI: s_mul_i32
-; SI: buffer_store_dword
+; GCN: s_load_dword
+; GCN: s_load_dword
+; GCN: s_mul_i32
+; GCN: buffer_store_dword
define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
%mul = mul i64 %b, %a
%trunc = trunc i64 %mul to i32
@@ -53,10 +53,10 @@ define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a
}
; FUNC-LABEL: {{^}}v_trunc_i64_mul_to_i32:
-; SI: s_load_dword
-; SI: s_load_dword
-; SI: v_mul_lo_i32
-; SI: buffer_store_dword
+; GCN: s_load_dword
+; GCN: s_load_dword
+; GCN: v_mul_lo_i32
+; GCN: buffer_store_dword
define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%b = load i64, i64 addrspace(1)* %bptr, align 8
@@ -71,8 +71,8 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 ad
; FUNC-LABEL: {{^}}mul64_sext_c:
; EG-DAG: MULLO_INT
; EG-DAG: MULHI_INT
-; SI-DAG: s_mul_i32
-; SI-DAG: v_mul_hi_i32
+; GCN-DAG: s_mul_i32
+; GCN-DAG: v_mul_hi_i32
define amdgpu_kernel void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
entry:
%0 = sext i32 %in to i64
@@ -84,9 +84,9 @@ entry:
; FUNC-LABEL: {{^}}v_mul64_sext_c:
; EG-DAG: MULLO_INT
; EG-DAG: MULHI_INT
-; SI-DAG: v_mul_lo_i32
-; SI-DAG: v_mul_hi_i32
-; SI: s_endpgm
+; GCN-DAG: v_mul_lo_i32
+; GCN-DAG: v_mul_hi_i32
+; GCN: s_endpgm
define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
%val = load i32, i32 addrspace(1)* %in, align 4
%ext = sext i32 %val to i64
@@ -96,9 +96,9 @@ define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(
}
; FUNC-LABEL: {{^}}v_mul64_sext_inline_imm:
-; SI-DAG: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
-; SI-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
-; SI: s_endpgm
+; GCN-DAG: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
+; GCN-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
+; GCN: s_endpgm
define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
%val = load i32, i32 addrspace(1)* %in, align 4
%ext = sext i32 %val to i64
@@ -108,12 +108,12 @@ define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 a
}
; FUNC-LABEL: {{^}}s_mul_i32:
-; SI: s_load_dword [[SRC0:s[0-9]+]],
-; SI: s_load_dword [[SRC1:s[0-9]+]],
-; SI: s_mul_i32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]]
-; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
-; SI: buffer_store_dword [[VRESULT]],
-; SI: s_endpgm
+; GCN: s_load_dword [[SRC0:s[0-9]+]],
+; GCN: s_load_dword [[SRC1:s[0-9]+]],
+; GCN: s_mul_i32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]]
+; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; GCN: buffer_store_dword [[VRESULT]],
+; GCN: s_endpgm
define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%mul = mul i32 %a, %b
store i32 %mul, i32 addrspace(1)* %out, align 4
@@ -121,7 +121,7 @@ define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nou
}
; FUNC-LABEL: {{^}}v_mul_i32:
-; SI: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
@@ -146,7 +146,7 @@ define amdgpu_kernel void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nou
}
; FUNC-LABEL: {{^}}v_mul_i64:
-; SI: v_mul_lo_i32
+; GCN: v_mul_lo_i32
define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%b = load i64, i64 addrspace(1)* %bptr, align 8
@@ -156,7 +156,7 @@ define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
}
; FUNC-LABEL: {{^}}mul32_in_branch:
-; SI: s_mul_i32
+; GCN: s_mul_i32
define amdgpu_kernel void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
entry:
%0 = icmp eq i32 %a, 0
@@ -177,9 +177,9 @@ endif:
}
; FUNC-LABEL: {{^}}mul64_in_branch:
-; SI-DAG: s_mul_i32
-; SI-DAG: v_mul_hi_u32
-; SI: s_endpgm
+; GCN-DAG: s_mul_i32
+; GCN-DAG: v_mul_hi_u32
+; GCN: s_endpgm
define amdgpu_kernel void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
entry:
%0 = icmp eq i64 %a, 0
@@ -201,29 +201,41 @@ endif:
; FIXME: Load dwordx4
; FUNC-LABEL: {{^}}s_mul_i128:
-; SI: s_load_dwordx2
-; SI: s_load_dwordx2
-; SI: s_load_dwordx2
-; SI: s_load_dwordx2
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
; SI: v_mul_hi_u32
; SI: v_mul_hi_u32
; SI: s_mul_i32
; SI: v_mul_hi_u32
; SI: s_mul_i32
+
; SI-DAG: s_mul_i32
; SI-DAG: v_mul_hi_u32
; SI-DAG: v_mul_hi_u32
; SI-DAG: s_mul_i32
; SI-DAG: s_mul_i32
; SI-DAG: v_mul_hi_u32
+
; SI: s_mul_i32
; SI: s_mul_i32
; SI: s_mul_i32
; SI: s_mul_i32
; SI: s_mul_i32
-; SI: buffer_store_dwordx4
+
+; VI: s_mul_i32
+; VI: s_mul_i32
+; VI: v_mul_hi_u32
+; VI: v_mul_hi_u32
+; VI: v_mad_u64_u32
+; VI: v_mad_u64_u32
+; VI: v_mad_u64_u32
+
+
+; GCN: buffer_store_dwordx4
define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) nounwind #0 {
%mul = mul i128 %a, %b
store i128 %mul, i128 addrspace(1)* %out
@@ -231,18 +243,19 @@ define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b)
}
; FUNC-LABEL: {{^}}v_mul_i128:
-; SI: {{buffer|flat}}_load_dwordx4
-; SI: {{buffer|flat}}_load_dwordx4
+; GCN: {{buffer|flat}}_load_dwordx4
+; GCN: {{buffer|flat}}_load_dwordx4
+
+; GCN-DAG: v_mul_lo_i32
+; GCN-DAG: v_mul_hi_u32
+; GCN-DAG: v_mul_hi_u32
+; GCN-DAG: v_mul_lo_i32
+; GCN-DAG: v_mul_hi_u32
+; GCN-DAG: v_mul_hi_u32
+; GCN-DAG: v_mul_lo_i32
+; GCN-DAG: v_mul_lo_i32
+; GCN: v_add_i32_e32
-; SI-DAG: v_mul_lo_i32
-; SI-DAG: v_mul_hi_u32
-; SI-DAG: v_mul_hi_u32
-; SI-DAG: v_mul_lo_i32
-; SI-DAG: v_mul_hi_u32
-; SI-DAG: v_mul_hi_u32
-; SI-DAG: v_mul_lo_i32
-; SI-DAG: v_mul_lo_i32
-; SI: v_add_i32_e32
; SI-DAG: v_mul_hi_u32
; SI-DAG: v_mul_lo_i32
; SI-DAG: v_mul_hi_u32
@@ -252,7 +265,11 @@ define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b)
; SI-DAG: v_mul_lo_i32
; SI-DAG: v_mul_lo_i32
-; SI: {{buffer|flat}}_store_dwordx4
+; VI: v_mad_u64_u32
+; VI: v_mad_u64_u32
+; VI: v_mad_u64_u32
+
+; GCN: {{buffer|flat}}_store_dwordx4
define amdgpu_kernel void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%gep.a = getelementptr inbounds i128, i128 addrspace(1)* %aptr, i32 %tid