summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/vicmp.ll
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-07-08 00:46:57 +0000
committerChris Lattner <sabre@nondot.org>2009-07-08 00:46:57 +0000
commitb2773e1adaee4a61b64ccbfa26c5ed9d8687d83b (patch)
tree3d30d5c339da9da85b78a2c78dd0e32aa01ab077 /test/CodeGen/ARM/vicmp.ll
parent2b7a271c713ff1db83990f691126bc33d6c59b52 (diff)
Change these tests to use [fi]cmp+sext instead of v[fi]cmp. No
functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@74979 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM/vicmp.ll')
-rw-r--r--test/CodeGen/ARM/vicmp.ll52
1 files changed, 31 insertions, 21 deletions
diff --git a/test/CodeGen/ARM/vicmp.ll b/test/CodeGen/ARM/vicmp.ll
index 86858f92934..2cb3eba7423 100644
--- a/test/CodeGen/ARM/vicmp.ll
+++ b/test/CodeGen/ARM/vicmp.ll
@@ -8,7 +8,7 @@
; RUN: grep {vcgt\\.u16} %t | count 1
; RUN: grep {vcge\\.u32} %t | count 1
-; This tests vicmp operations that do not map directly to NEON instructions.
+; This tests icmp operations that do not map directly to NEON instructions.
; Not-equal (ne) operations are implemented by VCEQ/VMVN. Less-than (lt/ult)
; and less-than-or-equal (le/ule) are implemented by swapping the arguments
; to VCGT and VCGE. Test all the operand types for not-equal but only sample
@@ -17,69 +17,79 @@
define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
- %tmp3 = vicmp ne <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
+ %tmp3 = icmp ne <8 x i8> %tmp1, %tmp2
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
}
define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
- %tmp3 = vicmp ne <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
+ %tmp3 = icmp ne <4 x i16> %tmp1, %tmp2
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
}
define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
- %tmp3 = vicmp ne <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
+ %tmp3 = icmp ne <2 x i32> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
- %tmp3 = vicmp ne <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
+ %tmp3 = icmp ne <16 x i8> %tmp1, %tmp2
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
}
define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
- %tmp3 = vicmp ne <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
+ %tmp3 = icmp ne <8 x i16> %tmp1, %tmp2
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
}
define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
- %tmp3 = vicmp ne <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
+ %tmp3 = icmp ne <4 x i32> %tmp1, %tmp2
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
}
define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
- %tmp3 = vicmp slt <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
+ %tmp3 = icmp slt <16 x i8> %tmp1, %tmp2
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
}
define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
- %tmp3 = vicmp sle <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
+ %tmp3 = icmp sle <4 x i16> %tmp1, %tmp2
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
}
define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
- %tmp3 = vicmp ult <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
+ %tmp3 = icmp ult <4 x i16> %tmp1, %tmp2
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
}
define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
- %tmp3 = vicmp ule <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
+ %tmp3 = icmp ule <4 x i32> %tmp1, %tmp2
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
}