diff options
author | Hans Wennborg <hans@hanshq.net> | 2018-01-18 11:37:05 +0000 |
---|---|---|
committer | Hans Wennborg <hans@hanshq.net> | 2018-01-18 11:37:05 +0000 |
commit | aa06dbe86af4ac06759ceaede59813f2606770f3 (patch) | |
tree | ca339dcef2f9efbe8808c27cfb7d0ed377ae00d0 /test/CodeGen | |
parent | 6079bf56feb869e1d033bd4c51d8f4cbd31ec25a (diff) |
Merging r322644:
------------------------------------------------------------------------
r322644 | d0k | 2018-01-17 05:01:06 -0800 (Wed, 17 Jan 2018) | 7 lines
[X86] Don't mutate shuffle arguments after early-out for AVX512
The match* functions have the annoying behavior of modifying its inputs.
Save and restore the inputs, just in case the early out for AVX512 is
hit. This is still not great and its only a matter of time this kind of
bug happens again, but I couldn't come up with a better pattern without
rewriting significant chunks of this code. Fixes PR35977.
------------------------------------------------------------------------
git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_60@322840 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r-- | test/CodeGen/X86/avx512-shuffles/partial_permute.ll | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/test/CodeGen/X86/avx512-shuffles/partial_permute.ll index 333efb04913..1a483355319 100644 --- a/test/CodeGen/X86/avx512-shuffles/partial_permute.ll +++ b/test/CodeGen/X86/avx512-shuffles/partial_permute.ll @@ -4780,3 +4780,42 @@ define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1(<8 x doub ret <2 x double> %res } +; PR35977 +define void @test_zext_v8i8_to_v8i16(<8 x i8>* %arg, <8 x i16>* %arg1) { +; CHECK-LABEL: test_zext_v8i8_to_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; CHECK-NEXT: vpsllw $8, %xmm0, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, (%rsi) +; CHECK-NEXT: retq + %tmp = getelementptr <8 x i8>, <8 x i8>* %arg, i32 0 + %tmp2 = load <8 x i8>, <8 x i8>* %tmp + %tmp3 = extractelement <8 x i8> %tmp2, i32 0 + %tmp4 = zext i8 %tmp3 to i16 + %tmp5 = insertelement <8 x i16> undef, i16 %tmp4, i32 0 + %tmp6 = extractelement <8 x i8> %tmp2, i32 1 + %tmp7 = zext i8 %tmp6 to i16 + %tmp8 = insertelement <8 x i16> %tmp5, i16 %tmp7, i32 1 + %tmp9 = extractelement <8 x i8> %tmp2, i32 2 + %tmp10 = zext i8 %tmp9 to i16 + %tmp11 = insertelement <8 x i16> %tmp8, i16 %tmp10, i32 2 + %tmp12 = extractelement <8 x i8> %tmp2, i32 3 + %tmp13 = zext i8 %tmp12 to i16 + %tmp14 = insertelement <8 x i16> %tmp11, i16 %tmp13, i32 3 + %tmp15 = extractelement <8 x i8> %tmp2, i32 4 + %tmp16 = zext i8 %tmp15 to i16 + %tmp17 = insertelement <8 x i16> %tmp14, i16 %tmp16, i32 4 + %tmp18 = extractelement <8 x i8> %tmp2, i32 5 + %tmp19 = zext i8 %tmp18 to i16 + %tmp20 = insertelement <8 x i16> %tmp17, i16 %tmp19, i32 5 + %tmp21 = extractelement <8 x i8> %tmp2, i32 6 + %tmp22 = zext i8 %tmp21 to i16 + %tmp23 = insertelement <8 x i16> %tmp20, i16 %tmp22, i32 6 + %tmp24 = extractelement <8 x i8> %tmp2, i32 7 + %tmp25 = zext i8 %tmp24 to i16 + %tmp26 = insertelement <8 x i16> %tmp23, i16 %tmp25, i32 7 + %tmp27 = shl <8 x i16> %tmp26, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8> + %tmp28 = getelementptr <8 x i16>, <8 x i16>* %arg1, i32 0 + store <8 x i16> %tmp27, <8 x i16>* %tmp28 + ret void +} |