summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorTom Stellard <tstellar@redhat.com>2018-10-19 19:56:57 +0000
committerTom Stellard <tstellar@redhat.com>2018-10-19 19:56:57 +0000
commit888225e7beebbdd82f6b6c7e139c2b094ea4faaa (patch)
treea7fd4999fb93520867a0469e35fab42e9ceb6b1a /test
parentb2504eaa1be0a2dea9c88da68f1cfe43c6853792 (diff)
Merging r343373:
------------------------------------------------------------------------ r343373 | rksimon | 2018-09-29 06:25:22 -0700 (Sat, 29 Sep 2018) | 3 lines [X86][SSE] Fixed issue with v2i64 variable shifts on 32-bit targets The shift amount might have peeked through a extract_subvector, altering the number of vector elements in the 'Amt' variable - so we were incorrectly calculating the ratio when peeking through bitcasts, resulting in incorrectly detecting splats. ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_70@344810 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/X86/known-signbits-vector.ll29
1 files changed, 18 insertions, 11 deletions
diff --git a/test/CodeGen/X86/known-signbits-vector.ll b/test/CodeGen/X86/known-signbits-vector.ll
index d243616fc45..1e48f8683c3 100644
--- a/test/CodeGen/X86/known-signbits-vector.ll
+++ b/test/CodeGen/X86/known-signbits-vector.ll
@@ -381,19 +381,26 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
; X32-NEXT: subl $16, %esp
+; X32-NEXT: vmovdqa {{.*#+}} xmm3 = [33,0,63,0]
+; X32-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
+; X32-NEXT: vpsrlq %xmm3, %xmm4, %xmm5
+; X32-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,0,1]
+; X32-NEXT: vpsrlq %xmm6, %xmm4, %xmm4
+; X32-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; X32-NEXT: vextractf128 $1, %ymm2, %xmm5
+; X32-NEXT: vpsrlq %xmm6, %xmm5, %xmm7
+; X32-NEXT: vpsrlq %xmm3, %xmm5, %xmm5
+; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; X32-NEXT: vpsrlq %xmm6, %xmm2, %xmm6
+; X32-NEXT: vpsrlq %xmm3, %xmm2, %xmm2
+; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
; X32-NEXT: vpmovsxdq 16(%ebp), %xmm3
+; X32-NEXT: vpxor %xmm4, %xmm5, %xmm5
+; X32-NEXT: vpsubq %xmm4, %xmm5, %xmm5
+; X32-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; X32-NEXT: vpsubq %xmm4, %xmm2, %xmm2
; X32-NEXT: vpmovsxdq 8(%ebp), %xmm4
-; X32-NEXT: vmovdqa {{.*#+}} xmm5 = [33,0,63,0]
-; X32-NEXT: vmovdqa {{.*#+}} xmm6 = [0,2147483648,0,2147483648]
-; X32-NEXT: vpsrlq %xmm5, %xmm6, %xmm6
-; X32-NEXT: vextractf128 $1, %ymm2, %xmm7
-; X32-NEXT: vpsrlq %xmm5, %xmm7, %xmm7
-; X32-NEXT: vpxor %xmm6, %xmm7, %xmm7
-; X32-NEXT: vpsubq %xmm6, %xmm7, %xmm7
-; X32-NEXT: vpsrlq %xmm5, %xmm2, %xmm2
-; X32-NEXT: vpxor %xmm6, %xmm2, %xmm2
-; X32-NEXT: vpsubq %xmm6, %xmm2, %xmm2
-; X32-NEXT: vinsertf128 $1, %xmm7, %ymm2, %ymm2
+; X32-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
; X32-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; X32-NEXT: vextractf128 $1, %ymm1, %xmm4
; X32-NEXT: vextractf128 $1, %ymm0, %xmm5