summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2018-01-30 10:30:33 +0000
committerHans Wennborg <hans@hanshq.net>2018-01-30 10:30:33 +0000
commitf8f8b9b531ff0dc0f39b6259e288aca9824c80a2 (patch)
tree6f95076dbae6eafc80c4b65780f41d332311cab6
parente57fcaa435b9843091f04661ac55f813601396ad (diff)
Merging r323672: (test-case re-generated)
------------------------------------------------------------------------ r323672 | ctopper | 2018-01-29 18:56:57 +0100 (Mon, 29 Jan 2018) | 5 lines [X86] Don't create SHRUNKBLEND when the condition is used by the true or false operand of the vselect. Fixes PR34592. Differential Revision: https://reviews.llvm.org/D42628 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_60@323743 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp5
-rw-r--r--test/CodeGen/X86/pr34592.ll74
2 files changed, 77 insertions, 2 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index e7d9334abe1..3a163637da2 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -31776,9 +31776,10 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
// Check all uses of the condition operand to check whether it will be
// consumed by non-BLEND instructions. Those may require that all bits
// are set properly.
- for (SDNode *U : Cond->uses()) {
+ for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
+ UI != UE; ++UI) {
// TODO: Add other opcodes eventually lowered into BLEND.
- if (U->getOpcode() != ISD::VSELECT)
+ if (UI->getOpcode() != ISD::VSELECT || UI.getOperandNo() != 0)
return SDValue();
}
diff --git a/test/CodeGen/X86/pr34592.ll b/test/CodeGen/X86/pr34592.ll
new file mode 100644
index 00000000000..09dfade1ee5
--- /dev/null
+++ b/test/CodeGen/X86/pr34592.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 -O0 | FileCheck %s
+
+define <16 x i64> @pluto(<16 x i64> %arg, <16 x i64> %arg1, <16 x i64> %arg2, <16 x i64> %arg3, <16 x i64> %arg4) {
+; CHECK-LABEL: pluto:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: andq $-32, %rsp
+; CHECK-NEXT: subq $352, %rsp # imm = 0x160
+; CHECK-NEXT: vmovaps 240(%rbp), %ymm8
+; CHECK-NEXT: vmovaps 208(%rbp), %ymm9
+; CHECK-NEXT: vmovaps 176(%rbp), %ymm10
+; CHECK-NEXT: vmovaps 144(%rbp), %ymm11
+; CHECK-NEXT: vmovaps 112(%rbp), %ymm12
+; CHECK-NEXT: vmovaps 80(%rbp), %ymm13
+; CHECK-NEXT: vmovaps 48(%rbp), %ymm14
+; CHECK-NEXT: vmovaps 16(%rbp), %ymm15
+; CHECK-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,18446744071562067968,18446744071562067968]
+; CHECK-NEXT: vblendvpd %ymm0, %ymm2, %ymm6, %ymm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
+; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm8[2,3,4,5,6,7]
+; CHECK-NEXT: vmovaps {{.*#+}} ymm13 = [18446744071562067968,18446744071562067968,0,0]
+; CHECK-NEXT: vblendvpd %ymm13, %ymm9, %ymm6, %ymm6
+; CHECK-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
+; CHECK-NEXT: vpermq {{.*#+}} ymm9 = ymm9[3,2,2,1]
+; CHECK-NEXT: vmovaps %xmm6, %xmm11
+; CHECK-NEXT: # implicit-def: %ymm13
+; CHECK-NEXT: vinserti128 $1, %xmm11, %ymm13, %ymm13
+; CHECK-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5],ymm9[6,7]
+; CHECK-NEXT: vmovaps %xmm0, %xmm11
+; CHECK-NEXT: # implicit-def: %ymm0
+; CHECK-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm0
+; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7]
+; CHECK-NEXT: vpermq {{.*#+}} ymm13 = ymm8[2,0,2,3]
+; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
+; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5],ymm7[6,7]
+; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,0,2,3]
+; CHECK-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
+; CHECK-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,1,2]
+; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; CHECK-NEXT: vpermq {{.*#+}} ymm6 = ymm8[2,1,1,3]
+; CHECK-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[0,1,0,1,4,5,4,5]
+; CHECK-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; CHECK-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm9, %ymm0
+; CHECK-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm5, %ymm1
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm5 # 32-byte Reload
+; CHECK-NEXT: vmovaps %ymm2, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm5, %ymm2
+; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm6 # 32-byte Reload
+; CHECK-NEXT: vmovaps %ymm3, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm6, %ymm3
+; CHECK-NEXT: vmovaps %ymm15, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm12, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm10, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT: vmovaps %ymm14, (%rsp) # 32-byte Spill
+; CHECK-NEXT: movq %rbp, %rsp
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
+bb:
+ %tmp = select <16 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <16 x i64> %arg, <16 x i64> %arg1
+ %tmp5 = select <16 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <16 x i64> %arg2, <16 x i64> zeroinitializer
+ %tmp6 = select <16 x i1> <i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true>, <16 x i64> %arg3, <16 x i64> %tmp5
+ %tmp7 = shufflevector <16 x i64> %tmp, <16 x i64> %tmp6, <16 x i32> <i32 11, i32 18, i32 24, i32 9, i32 14, i32 29, i32 29, i32 6, i32 14, i32 28, i32 8, i32 9, i32 22, i32 12, i32 25, i32 6>
+ ret <16 x i64> %tmp7
+}