; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; fold (sdiv undef, x) -> 0 define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_undef0: ; SSE: # %bb.0: ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_undef0: ; AVX: # %bb.0: ; AVX-NEXT: retq %1 = sdiv <4 x i32> undef, %x ret <4 x i32> %1 } ; fold (sdiv x, undef) -> undef define <4 x i32> @combine_vec_sdiv_undef1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_undef1: ; SSE: # %bb.0: ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_undef1: ; AVX: # %bb.0: ; AVX-NEXT: retq %1 = sdiv <4 x i32> %x, undef ret <4 x i32> %1 } ; fold (sdiv x, 1) -> x define <4 x i32> @combine_vec_sdiv_by_one(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_one: ; SSE: # %bb.0: ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_by_one: ; AVX: # %bb.0: ; AVX-NEXT: retq %1 = sdiv <4 x i32> %x, ret <4 x i32> %1 } ; fold (sdiv x, -1) -> 0 - x define <4 x i32> @combine_vec_sdiv_by_negone(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_negone: ; SSE: # %bb.0: ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: psubd %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_by_negone: ; AVX: # %bb.0: ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %1 = sdiv <4 x i32> %x, ret <4 x i32> %1 } ; fold (sdiv x, y) -> (udiv x, y) iff x and y are positive define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_pos0: ; SSE: # %bb.0: ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-NEXT: psrld $2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_by_pos0: ; AVX: # %bb.0: ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = and <4 x i32> %x, %2 = sdiv <4 x i32> %1, ret <4 x i32> %2 } define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_pos1: ; SSE: # %bb.0: ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: psrld $3, %xmm1 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE-NEXT: psrld $4, %xmm0 ; SSE-NEXT: psrld $2, %xmm2 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_sdiv_by_pos1: ; AVX1: # %bb.0: ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1 ; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; AVX1-NEXT: vpsrld $3, %xmm0, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_sdiv_by_pos1: ; AVX2: # %bb.0: ; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = and <4 x i32> %x, %2 = sdiv <4 x i32> %1, ret <4 x i32> %2 } ; fold (sdiv x, (1 << c)) -> x >>u c define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_pow2a: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: psrad $31, %xmm1 ; SSE-NEXT: psrld $30, %xmm1 ; SSE-NEXT: paddd %xmm0, %xmm1 ; SSE-NEXT: psrad $2, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_by_pow2a: ; AVX: # %bb.0: ; AVX-NEXT: vpsrad $31, %xmm0, %xmm1 ; AVX-NEXT: vpsrld $30, %xmm1, %xmm1 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = sdiv <4 x i32> %x, ret <4 x i32> %1 } define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_pow2b: ; SSE: # %bb.0: ; SSE-NEXT: pextrd $1, %xmm0, %eax ; SSE-NEXT: movl %eax, %ecx ; SSE-NEXT: sarl $31, %ecx ; SSE-NEXT: shrl $30, %ecx ; SSE-NEXT: addl %eax, %ecx ; SSE-NEXT: sarl $2, %ecx ; SSE-NEXT: pextrd $2, %xmm0, %eax ; SSE-NEXT: pextrd $3, %xmm0, %edx ; SSE-NEXT: pinsrd $1, %ecx, %xmm0 ; SSE-NEXT: movl %eax, %ecx ; SSE-NEXT: sarl $31, %ecx ; SSE-NEXT: shrl $29, %ecx ; SSE-NEXT: addl %eax, %ecx ; SSE-NEXT: sarl $3, %ecx ; SSE-NEXT: pinsrd $2, %ecx, %xmm0 ; SSE-NEXT: movl %edx, %eax ; SSE-NEXT: sarl $31, %eax ; SSE-NEXT: shrl $28, %eax ; SSE-NEXT: addl %edx, %eax ; SSE-NEXT: sarl $4, %eax ; SSE-NEXT: pinsrd $3, %eax, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_by_pow2b: ; AVX: # %bb.0: ; AVX-NEXT: vpextrd $1, %xmm0, %eax ; AVX-NEXT: movl %eax, %ecx ; AVX-NEXT: sarl $31, %ecx ; AVX-NEXT: shrl $30, %ecx ; AVX-NEXT: addl %eax, %ecx ; AVX-NEXT: sarl $2, %ecx ; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm1 ; AVX-NEXT: vpextrd $2, %xmm0, %eax ; AVX-NEXT: movl %eax, %ecx ; AVX-NEXT: sarl $31, %ecx ; AVX-NEXT: shrl $29, %ecx ; AVX-NEXT: addl %eax, %ecx ; AVX-NEXT: sarl $3, %ecx ; AVX-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1 ; AVX-NEXT: vpextrd $3, %xmm0, %eax ; AVX-NEXT: movl %eax, %ecx ; AVX-NEXT: sarl $31, %ecx ; AVX-NEXT: shrl $28, %ecx ; AVX-NEXT: addl %eax, %ecx ; AVX-NEXT: sarl $4, %ecx ; AVX-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm0 ; AVX-NEXT: retq %1 = sdiv <4 x i32> %x, ret <4 x i32> %1 }