; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 -O0 | FileCheck %s define <16 x i64> @pluto(<16 x i64> %arg, <16 x i64> %arg1, <16 x i64> %arg2, <16 x i64> %arg3, <16 x i64> %arg4) { ; CHECK-LABEL: pluto: ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: pushq %rbp ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbp, -16 ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: andq $-32, %rsp ; CHECK-NEXT: subq $352, %rsp # imm = 0x160 ; CHECK-NEXT: vmovaps 240(%rbp), %ymm8 ; CHECK-NEXT: vmovaps 208(%rbp), %ymm9 ; CHECK-NEXT: vmovaps 176(%rbp), %ymm10 ; CHECK-NEXT: vmovaps 144(%rbp), %ymm11 ; CHECK-NEXT: vmovaps 112(%rbp), %ymm12 ; CHECK-NEXT: vmovaps 80(%rbp), %ymm13 ; CHECK-NEXT: vmovaps 48(%rbp), %ymm14 ; CHECK-NEXT: vmovaps 16(%rbp), %ymm15 ; CHECK-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,18446744071562067968,18446744071562067968] ; CHECK-NEXT: vblendvpd %ymm0, %ymm2, %ymm6, %ymm0 ; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7] ; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm8[2,3,4,5,6,7] ; CHECK-NEXT: vmovaps {{.*#+}} ymm13 = [18446744071562067968,18446744071562067968,0,0] ; CHECK-NEXT: vblendvpd %ymm13, %ymm9, %ymm6, %ymm6 ; CHECK-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7] ; CHECK-NEXT: vpermq {{.*#+}} ymm9 = ymm9[3,2,2,1] ; CHECK-NEXT: vmovaps %xmm6, %xmm11 ; CHECK-NEXT: # implicit-def: %ymm13 ; CHECK-NEXT: vinserti128 $1, %xmm11, %ymm13, %ymm13 ; CHECK-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5],ymm9[6,7] ; CHECK-NEXT: vmovaps %xmm0, %xmm11 ; CHECK-NEXT: # implicit-def: %ymm0 ; CHECK-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm0 ; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7] ; CHECK-NEXT: vpermq {{.*#+}} ymm13 = ymm8[2,0,2,3] ; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7] ; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5],ymm7[6,7] ; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,0,2,3] ; CHECK-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7] ; CHECK-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,1,2] ; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7] ; CHECK-NEXT: vpermq {{.*#+}} ymm6 = ymm8[2,1,1,3] ; CHECK-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[0,1,0,1,4,5,4,5] ; CHECK-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7] ; CHECK-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm9, %ymm0 ; CHECK-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm5, %ymm1 ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm5 # 32-byte Reload ; CHECK-NEXT: vmovaps %ymm2, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm5, %ymm2 ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm6 # 32-byte Reload ; CHECK-NEXT: vmovaps %ymm3, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm6, %ymm3 ; CHECK-NEXT: vmovaps %ymm15, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm12, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm10, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovaps %ymm14, (%rsp) # 32-byte Spill ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp ; CHECK-NEXT: retq bb: %tmp = select <16 x i1> , <16 x i64> %arg, <16 x i64> %arg1 %tmp5 = select <16 x i1> , <16 x i64> %arg2, <16 x i64> zeroinitializer %tmp6 = select <16 x i1> , <16 x i64> %arg3, <16 x i64> %tmp5 %tmp7 = shufflevector <16 x i64> %tmp, <16 x i64> %tmp6, <16 x i32> ret <16 x i64> %tmp7 }