summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/vmla.ll
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2010-09-01 23:50:19 +0000
committerBob Wilson <bob.wilson@apple.com>2010-09-01 23:50:19 +0000
commitd0b69cf1198dadbb7bdfc385334b67f60f756539 (patch)
treea859ccd0b1fbc03df8dd39a7a8d3b0805fd6ae8e /test/CodeGen/ARM/vmla.ll
parenta2259e17b7c657ddeb0c3a03a5ca291ca4e0dc7e (diff)
Remove NEON vmull, vmlal, and vmlsl intrinsics, replacing them with multiply,
add, and subtract operations with zero-extended or sign-extended vectors. Update tests. Add auto-upgrade support for the old intrinsics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112773 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM/vmla.ll')
-rw-r--r--test/CodeGen/ARM/vmla.ll78
1 files changed, 50 insertions, 28 deletions
diff --git a/test/CodeGen/ARM/vmla.ll b/test/CodeGen/ARM/vmla.ll
index 77cf10ad3e6..9c6b210be79 100644
--- a/test/CodeGen/ARM/vmla.ll
+++ b/test/CodeGen/ARM/vmla.ll
@@ -94,8 +94,11 @@ define <8 x i16> @vmlals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
%tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
+ %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+ %tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
+ %tmp6 = mul <8 x i16> %tmp4, %tmp5
+ %tmp7 = add <8 x i16> %tmp1, %tmp6
+ ret <8 x i16> %tmp7
}
define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -104,8 +107,11 @@ define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
%tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
+ %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+ %tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
+ %tmp6 = mul <4 x i32> %tmp4, %tmp5
+ %tmp7 = add <4 x i32> %tmp1, %tmp6
+ ret <4 x i32> %tmp7
}
define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -114,8 +120,11 @@ define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
%tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
+ %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+ %tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
+ %tmp6 = mul <2 x i64> %tmp4, %tmp5
+ %tmp7 = add <2 x i64> %tmp1, %tmp6
+ ret <2 x i64> %tmp7
}
define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
@@ -124,8 +133,11 @@ define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
%tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
+ %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+ %tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
+ %tmp6 = mul <8 x i16> %tmp4, %tmp5
+ %tmp7 = add <8 x i16> %tmp1, %tmp6
+ ret <8 x i16> %tmp7
}
define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -134,8 +146,11 @@ define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
%tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
+ %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+ %tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
+ %tmp6 = mul <4 x i32> %tmp4, %tmp5
+ %tmp7 = add <4 x i32> %tmp1, %tmp6
+ ret <4 x i32> %tmp7
}
define <2 x i64> @vmlalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -144,8 +159,11 @@ define <2 x i64> @vmlalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
%tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
+ %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+ %tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
+ %tmp6 = mul <2 x i64> %tmp4, %tmp5
+ %tmp7 = add <2 x i64> %tmp1, %tmp6
+ ret <2 x i64> %tmp7
}
define arm_aapcs_vfpcc <4 x i32> @test_vmlal_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
@@ -153,8 +171,11 @@ entry:
; CHECK: test_vmlal_lanes16
; CHECK: vmlal.s16 q0, d2, d3[1]
%0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
+ %1 = sext <4 x i16> %arg1_int16x4_t to <4 x i32>
+ %2 = sext <4 x i16> %0 to <4 x i32>
+ %3 = mul <4 x i32> %1, %2
+ %4 = add <4 x i32> %arg0_int32x4_t, %3
+ ret <4 x i32> %4
}
define arm_aapcs_vfpcc <2 x i64> @test_vmlal_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
@@ -162,8 +183,11 @@ entry:
; CHECK: test_vmlal_lanes32
; CHECK: vmlal.s32 q0, d2, d3[1]
%0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
+ %1 = sext <2 x i32> %arg1_int32x2_t to <2 x i64>
+ %2 = sext <2 x i32> %0 to <2 x i64>
+ %3 = mul <2 x i64> %1, %2
+ %4 = add <2 x i64> %arg0_int64x2_t, %3
+ ret <2 x i64> %4
}
define arm_aapcs_vfpcc <4 x i32> @test_vmlal_laneu16(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %arg2_uint16x4_t) nounwind readnone {
@@ -171,8 +195,11 @@ entry:
; CHECK: test_vmlal_laneu16
; CHECK: vmlal.u16 q0, d2, d3[1]
%0 = shufflevector <4 x i16> %arg2_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
+ %1 = zext <4 x i16> %arg1_uint16x4_t to <4 x i32>
+ %2 = zext <4 x i16> %0 to <4 x i32>
+ %3 = mul <4 x i32> %1, %2
+ %4 = add <4 x i32> %arg0_uint32x4_t, %3
+ ret <4 x i32> %4
}
define arm_aapcs_vfpcc <2 x i64> @test_vmlal_laneu32(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %arg2_uint32x2_t) nounwind readnone {
@@ -180,14 +207,9 @@ entry:
; CHECK: test_vmlal_laneu32
; CHECK: vmlal.u32 q0, d2, d3[1]
%0 = shufflevector <2 x i32> %arg2_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
+ %1 = zext <2 x i32> %arg1_uint32x2_t to <2 x i64>
+ %2 = zext <2 x i32> %0 to <2 x i64>
+ %3 = mul <2 x i64> %1, %2
+ %4 = add <2 x i64> %arg0_uint64x2_t, %3
+ ret <2 x i64> %4
}
-
-declare <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone