summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/vector-trunc-math.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/vector-trunc-math.ll')
-rw-r--r--test/CodeGen/X86/vector-trunc-math.ll126
1 files changed, 63 insertions, 63 deletions
diff --git a/test/CodeGen/X86/vector-trunc-math.ll b/test/CodeGen/X86/vector-trunc-math.ll
index 45479941143..7d26cec9928 100644
--- a/test/CodeGen/X86/vector-trunc-math.ll
+++ b/test/CodeGen/X86/vector-trunc-math.ll
@@ -33,7 +33,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -41,7 +41,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <4 x i64> %a0, %a1
@@ -101,7 +101,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -146,7 +146,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -154,7 +154,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i32> %a0, %a1
@@ -383,7 +383,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -435,7 +435,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
;
; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
@@ -477,7 +477,7 @@ define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_add_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -576,7 +576,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_add_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -771,7 +771,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -816,7 +816,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -824,7 +824,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, %a1
@@ -884,7 +884,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -929,7 +929,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -937,7 +937,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, %a1
@@ -1166,7 +1166,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1214,7 +1214,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1222,7 +1222,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
@@ -1287,7 +1287,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1331,7 +1331,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1339,7 +1339,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1567,7 +1567,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1635,8 +1635,8 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
;
; AVX512F-LABEL: trunc_mul_v4i64_v4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
@@ -1645,8 +1645,8 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
;
; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
@@ -1655,11 +1655,11 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
;
; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <4 x i64> %a0, %a1
@@ -1810,7 +1810,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1818,7 +1818,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <8 x i32> %a0, %a1
@@ -2241,7 +2241,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2293,7 +2293,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
;
; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
@@ -2350,7 +2350,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_mul_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2449,7 +2449,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_mul_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2793,7 +2793,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2834,7 +2834,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2842,7 +2842,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <4 x i64> %a0, %a1
@@ -2898,7 +2898,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2941,7 +2941,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2949,7 +2949,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i32> %a0, %a1
@@ -3164,7 +3164,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3209,7 +3209,7 @@ define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_and_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3308,7 +3308,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_and_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3503,7 +3503,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -3546,7 +3546,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3554,7 +3554,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <4 x i64> %a0, %a1
@@ -3610,7 +3610,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3653,7 +3653,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3661,7 +3661,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i32> %a0, %a1
@@ -3876,7 +3876,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3921,7 +3921,7 @@ define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_xor_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4020,7 +4020,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_xor_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4215,7 +4215,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -4258,7 +4258,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4266,7 +4266,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <4 x i64> %a0, %a1
@@ -4322,7 +4322,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4365,7 +4365,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4373,7 +4373,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i32> %a0, %a1
@@ -4588,7 +4588,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -4633,7 +4633,7 @@ define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_or_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4732,7 +4732,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_or_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4927,7 +4927,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper