diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-06-23 14:38:00 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-06-23 14:38:00 +0000 |
commit | 47b190de86f90f07af1fedf525ffba0154f122ec (patch) | |
tree | 927e645ac3278a0214d5fe92e43f66390899cfe1 /test | |
parent | a550fda1e69c6f45ef4f5af4f3387a91716db072 (diff) |
[X86][AVX] Extended vector average tests
Added AVX1 tests and merged AVX1/AVX2/AVX512 checks where possible
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@306107 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/X86/avg.ll | 1328 |
1 files changed, 917 insertions, 411 deletions
diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll index 634007167f0..d4712becd09 100644 --- a/test/CodeGen/X86/avg.ll +++ b/test/CodeGen/X86/avg.ll @@ -1,8 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW define void @avg_v4i8(<4 x i8>* %a, <4 x i8>* %b) { ; SSE2-LABEL: avg_v4i8: @@ -13,29 +14,13 @@ define void @avg_v4i8(<4 x i8>* %a, <4 x i8>* %b) { ; SSE2-NEXT: movd %xmm1, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v4i8: -; AVX2: # BB#0: -; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX2-NEXT: vpavgb %xmm0, %xmm1, %xmm0 -; AVX2-NEXT: vmovd %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v4i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX512F-NEXT: vpavgb %xmm0, %xmm1, %xmm0 -; AVX512F-NEXT: vmovd %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v4i8: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512BW-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX512BW-NEXT: vpavgb %xmm0, %xmm1, %xmm0 -; AVX512BW-NEXT: vmovd %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v4i8: +; AVX: # BB#0: +; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <4 x i8>, <4 x i8>* %a %2 = load <4 x i8>, <4 x i8>* %b %3 = zext <4 x i8> %1 to <4 x i32> @@ -57,29 +42,13 @@ define void @avg_v8i8(<8 x i8>* %a, <8 x i8>* %b) { ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v8i8: -; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpavgb %xmm0, %xmm1, %xmm0 -; AVX2-NEXT: vmovq %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v8i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512F-NEXT: vpavgb %xmm0, %xmm1, %xmm0 -; AVX512F-NEXT: vmovq %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v8i8: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512BW-NEXT: vpavgb %xmm0, %xmm1, %xmm0 -; AVX512BW-NEXT: vmovq %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v8i8: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <8 x i8>, <8 x i8>* %a %2 = load <8 x i8>, <8 x i8>* %b %3 = zext <8 x i8> %1 to <8 x i32> @@ -100,26 +69,12 @@ define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v16i8: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-NEXT: vpavgb (%rdi), %xmm0, %xmm0 -; AVX2-NEXT: vmovdqu %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v16i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rsi), %xmm0 -; AVX512F-NEXT: vpavgb (%rdi), %xmm0, %xmm0 -; AVX512F-NEXT: vmovdqu %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v16i8: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rsi), %xmm0 -; AVX512BW-NEXT: vpavgb (%rdi), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v16i8: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rsi), %xmm0 +; AVX-NEXT: vpavgb (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovdqu %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a %2 = load <16 x i8>, <16 x i8>* %b %3 = zext <16 x i8> %1 to <16 x i32> @@ -221,6 +176,69 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) { ; SSE2-NEXT: movdqu %xmm4, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v32i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm9 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v32i8: ; AVX2: # BB#0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 @@ -229,21 +247,13 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: avg_v32i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 -; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqu %ymm0, (%rax) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v32i8: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rsi), %ymm0 -; AVX512BW-NEXT: vpavgb (%rdi), %ymm0, %ymm0 -; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; AVX512-LABEL: avg_v32i8: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa (%rsi), %ymm0 +; AVX512-NEXT: vpavgb (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vmovdqu %ymm0, (%rax) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %a %2 = load <32 x i8>, <32 x i8>* %b %3 = zext <32 x i8> %1 to <32 x i32> @@ -454,6 +464,131 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) { ; SSE2-NEXT: addq $152, %rsp ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v64i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm5, %xmm14 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm7, %xmm7 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4 +; AVX1-NEXT: vmovdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm9 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm11, %xmm11 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm15, %xmm15 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm6, %xmm6 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm8, %xmm2 +; AVX1-NEXT: vmovdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm8 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm12, %xmm12 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm13, %xmm4 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm1, %xmm13 # 16-byte Folded Reload +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm1, %xmm5 # 16-byte Folded Reload +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm10 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm7, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm14, %xmm14 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vpand %xmm7, %xmm14, %xmm14 +; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm14, %xmm0, %xmm14 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm0 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm14, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm11, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm6, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm15, %xmm6 +; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpsrld $1, %xmm8, %xmm2 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm12, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm13, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm10, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovups %ymm1, (%rax) +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v64i8: ; AVX2: # BB#0: ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero @@ -596,29 +731,13 @@ define void @avg_v4i16(<4 x i16>* %a, <4 x i16>* %b) { ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v4i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpavgw %xmm0, %xmm1, %xmm0 -; AVX2-NEXT: vmovq %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v4i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512F-NEXT: vpavgw %xmm0, %xmm1, %xmm0 -; AVX512F-NEXT: vmovq %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v4i16: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512BW-NEXT: vpavgw %xmm0, %xmm1, %xmm0 -; AVX512BW-NEXT: vmovq %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v4i16: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a %2 = load <4 x i16>, <4 x i16>* %b %3 = zext <4 x i16> %1 to <4 x i32> @@ -639,26 +758,12 @@ define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v8i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa (%rsi), %xmm0 -; AVX2-NEXT: vpavgw (%rdi), %xmm0, %xmm0 -; AVX2-NEXT: vmovdqu %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v8i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rsi), %xmm0 -; AVX512F-NEXT: vpavgw (%rdi), %xmm0, %xmm0 -; AVX512F-NEXT: vmovdqu %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v8i16: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rsi), %xmm0 -; AVX512BW-NEXT: vpavgw (%rdi), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v8i16: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rsi), %xmm0 +; AVX-NEXT: vpavgw (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vmovdqu %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a %2 = load <8 x i16>, <8 x i16>* %b %3 = zext <8 x i16> %1 to <8 x i32> @@ -718,6 +823,41 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v16i16: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm8, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] +; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7] +; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v16i16: ; AVX2: # BB#0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 @@ -726,21 +866,13 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: avg_v16i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 -; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqu %ymm0, (%rax) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v16i16: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rsi), %ymm0 -; AVX512BW-NEXT: vpavgw (%rdi), %ymm0, %ymm0 -; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; AVX512-LABEL: avg_v16i16: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa (%rsi), %ymm0 +; AVX512-NEXT: vpavgw (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vmovdqu %ymm0, (%rax) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %a %2 = load <16 x i16>, <16 x i16>* %b %3 = zext <16 x i16> %1 to <16 x i32> @@ -841,6 +973,69 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm8, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v32i16: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm9 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2],xmm6[3],xmm0[4],xmm6[5],xmm0[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3],xmm3[4],xmm6[5],xmm3[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vmovups %ymm1, (%rax) +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v32i16: ; AVX2: # BB#0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero @@ -926,29 +1121,13 @@ define void @avg_v4i8_2(<4 x i8>* %a, <4 x i8>* %b) { ; SSE2-NEXT: movd %xmm1, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v4i8_2: -; AVX2: # BB#0: -; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX2-NEXT: vpavgb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v4i8_2: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX512F-NEXT: vpavgb %xmm1, %xmm0, %xmm0 -; AVX512F-NEXT: vmovd %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v4i8_2: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512BW-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX512BW-NEXT: vpavgb %xmm1, %xmm0, %xmm0 -; AVX512BW-NEXT: vmovd %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v4i8_2: +; AVX: # BB#0: +; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <4 x i8>, <4 x i8>* %a %2 = load <4 x i8>, <4 x i8>* %b %3 = zext <4 x i8> %1 to <4 x i32> @@ -970,29 +1149,13 @@ define void @avg_v8i8_2(<8 x i8>* %a, <8 x i8>* %b) { ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v8i8_2: -; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpavgb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vmovq %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v8i8_2: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512F-NEXT: vpavgb %xmm1, %xmm0, %xmm0 -; AVX512F-NEXT: vmovq %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v8i8_2: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512BW-NEXT: vpavgb %xmm1, %xmm0, %xmm0 -; AVX512BW-NEXT: vmovq %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v8i8_2: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <8 x i8>, <8 x i8>* %a %2 = load <8 x i8>, <8 x i8>* %b %3 = zext <8 x i8> %1 to <8 x i32> @@ -1013,26 +1176,12 @@ define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v16i8_2: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-NEXT: vpavgb (%rsi), %xmm0, %xmm0 -; AVX2-NEXT: vmovdqu %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v16i8_2: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512F-NEXT: vpavgb (%rsi), %xmm0, %xmm0 -; AVX512F-NEXT: vmovdqu %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v16i8_2: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512BW-NEXT: vpavgb (%rsi), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v16i8_2: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rdi), %xmm0 +; AVX-NEXT: vpavgb (%rsi), %xmm0, %xmm0 +; AVX-NEXT: vmovdqu %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a %2 = load <16 x i8>, <16 x i8>* %b %3 = zext <16 x i8> %1 to <16 x i32> @@ -1134,6 +1283,69 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) { ; SSE2-NEXT: movdqu %xmm4, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v32i8_2: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm9 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v32i8_2: ; AVX2: # BB#0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 @@ -1142,21 +1354,13 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: avg_v32i8_2: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqu %ymm0, (%rax) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v32i8_2: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vpavgb (%rsi), %ymm0, %ymm0 -; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; AVX512-LABEL: avg_v32i8_2: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0 +; AVX512-NEXT: vmovdqu %ymm0, (%rax) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %a %2 = load <32 x i8>, <32 x i8>* %b %3 = zext <32 x i8> %1 to <32 x i32> @@ -1325,6 +1529,117 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) { ; SSE2-NEXT: movdqu %xmm10, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v64i8_2: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm6, %xmm0, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpaddd %xmm6, %xmm1, %xmm0 +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpaddd %xmm6, %xmm4, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vmovdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpaddd %xmm6, %xmm5, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpaddd %xmm6, %xmm14, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm14, %xmm14 +; AVX1-NEXT: vpaddd %xmm6, %xmm8, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm8, %xmm5 +; AVX1-NEXT: vpaddd %xmm6, %xmm11, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm11, %xmm3 +; AVX1-NEXT: vmovdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpaddd %xmm6, %xmm12, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm12, %xmm8 +; AVX1-NEXT: vpaddd %xmm6, %xmm15, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm15, %xmm11 +; AVX1-NEXT: vpaddd %xmm6, %xmm13, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm13, %xmm13 +; AVX1-NEXT: vpaddd %xmm6, %xmm9, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm9, %xmm12 +; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm15 +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm6 +; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm6 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm14, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vpsrld $1, %xmm8, %xmm1 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm13, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm11, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm12, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm6, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm15, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vmovups %ymm1, (%rax) +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v64i8_2: ; AVX2: # BB#0: ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero @@ -1456,29 +1771,13 @@ define void @avg_v4i16_2(<4 x i16>* %a, <4 x i16>* %b) { ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v4i16_2: -; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpavgw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vmovq %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v4i16_2: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512F-NEXT: vpavgw %xmm1, %xmm0, %xmm0 -; AVX512F-NEXT: vmovq %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v4i16_2: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512BW-NEXT: vpavgw %xmm1, %xmm0, %xmm0 -; AVX512BW-NEXT: vmovq %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v4i16_2: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a %2 = load <4 x i16>, <4 x i16>* %b %3 = zext <4 x i16> %1 to <4 x i32> @@ -1499,26 +1798,12 @@ define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v8i16_2: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-NEXT: vpavgw (%rsi), %xmm0, %xmm0 -; AVX2-NEXT: vmovdqu %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v8i16_2: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512F-NEXT: vpavgw (%rsi), %xmm0, %xmm0 -; AVX512F-NEXT: vmovdqu %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v8i16_2: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512BW-NEXT: vpavgw (%rsi), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v8i16_2: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rdi), %xmm0 +; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0 +; AVX-NEXT: vmovdqu %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a %2 = load <8 x i16>, <8 x i16>* %b %3 = zext <8 x i16> %1 to <8 x i32> @@ -1578,6 +1863,41 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v16i16_2: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm8, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] +; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7] +; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v16i16_2: ; AVX2: # BB#0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 @@ -1586,21 +1906,13 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: avg_v16i16_2: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqu %ymm0, (%rax) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v16i16_2: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vpavgw (%rsi), %ymm0, %ymm0 -; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; AVX512-LABEL: avg_v16i16_2: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vpavgw (%rsi), %ymm0, %ymm0 +; AVX512-NEXT: vmovdqu %ymm0, (%rax) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %a %2 = load <16 x i16>, <16 x i16>* %b %3 = zext <16 x i16> %1 to <16 x i32> @@ -1701,6 +2013,69 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm8, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v32i16_2: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1] +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm9 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpaddd %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2],xmm6[3],xmm0[4],xmm6[5],xmm0[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3],xmm3[4],xmm6[5],xmm3[6],xmm6[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7] +; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vmovups %ymm1, (%rax) +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v32i16_2: ; AVX2: # BB#0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero @@ -1785,26 +2160,12 @@ define void @avg_v4i8_const(<4 x i8>* %a) { ; SSE2-NEXT: movd %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v4i8_const: -; AVX2: # BB#0: -; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX2-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v4i8_const: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512F-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX512F-NEXT: vmovd %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v4i8_const: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovd %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v4i8_const: +; AVX: # BB#0: +; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <4 x i8>, <4 x i8>* %a %2 = zext <4 x i8> %1 to <4 x i32> %3 = add nuw nsw <4 x i32> %2, <i32 1, i32 2, i32 3, i32 4> @@ -1822,26 +2183,12 @@ define void @avg_v8i8_const(<8 x i8>* %a) { ; SSE2-NEXT: movq %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v8i8_const: -; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vmovq %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v8i8_const: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX512F-NEXT: vmovq %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v8i8_const: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovq %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v8i8_const: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <8 x i8>, <8 x i8>* %a %2 = zext <8 x i8> %1 to <8 x i32> %3 = add nuw nsw <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> @@ -1859,26 +2206,12 @@ define void @avg_v16i8_const(<16 x i8>* %a) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v16i8_const: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vmovdqu %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v16i8_const: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512F-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX512F-NEXT: vmovdqu %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v16i8_const: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v16i8_const: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rdi), %xmm0 +; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovdqu %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a %2 = zext <16 x i8> %1 to <16 x i32> %3 = add nuw nsw <16 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> @@ -1949,6 +2282,54 @@ define void @avg_v32i8_const(<32 x i8>* %a) { ; SSE2-NEXT: movdqu %xmm4, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v32i8_const: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [5,6,7,8] +; AVX1-NEXT: vpaddd %xmm0, %xmm7, %xmm9 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1,2,3,4] +; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpaddd %xmm0, %xmm5, %xmm5 +; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm3 +; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm8 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5 +; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm7 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vpand %xmm1, %xmm7, %xmm7 +; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm6 +; AVX1-NEXT: vpackuswb %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm5 +; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4 +; AVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpackuswb %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v32i8_const: ; AVX2: # BB#0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 @@ -1957,21 +2338,13 @@ define void @avg_v32i8_const(<32 x i8>* %a) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: avg_v32i8_const: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqu %ymm0, (%rax) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v32i8_const: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0 -; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; AVX512-LABEL: avg_v32i8_const: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: vmovdqu %ymm0, (%rax) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %a %2 = zext <32 x i8> %1 to <32 x i32> %3 = add nuw nsw <32 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> @@ -2101,6 +2474,99 @@ define void @avg_v64i8_const(<64 x i8>* %a) { ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v64i8_const: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [5,6,7,8] +; AVX1-NEXT: vpaddd %xmm0, %xmm5, %xmm5 +; AVX1-NEXT: vpaddd %xmm0, %xmm7, %xmm15 +; AVX1-NEXT: vpaddd %xmm0, %xmm13, %xmm13 +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm7 +; AVX1-NEXT: vpaddd %xmm0, %xmm11, %xmm11 +; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm1 +; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vpaddd %xmm0, %xmm9, %xmm9 +; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4] +; AVX1-NEXT: vpaddd %xmm2, %xmm12, %xmm0 +; AVX1-NEXT: vpaddd %xmm2, %xmm10, %xmm10 +; AVX1-NEXT: vpaddd %xmm2, %xmm8, %xmm8 +; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm2, %xmm6, %xmm1 +; AVX1-NEXT: vpaddd %xmm2, %xmm14, %xmm6 +; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm2, %xmm12 # 16-byte Folded Reload +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm14 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm10, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm15, %xmm2 +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm8, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm13, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm7, %xmm4 +; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm11, %xmm2 +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm6, %xmm2 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm12, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm14, %xmm3 +; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vmovups %ymm1, (%rax) +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v64i8_const: ; AVX2: # BB#0: ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero @@ -2215,26 +2681,12 @@ define void @avg_v4i16_const(<4 x i16>* %a) { ; SSE2-NEXT: movq %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v4i16_const: -; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vmovq %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v4i16_const: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 -; AVX512F-NEXT: vmovq %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v4i16_const: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovq %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v4i16_const: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a %2 = zext <4 x i16> %1 to <4 x i32> %3 = add nuw nsw <4 x i32> %2, <i32 1, i32 2, i32 3, i32 4> @@ -2252,26 +2704,12 @@ define void @avg_v8i16_const(<8 x i16>* %a) { ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; -; AVX2-LABEL: avg_v8i16_const: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa (%rdi), %xmm0 -; AVX2-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vmovdqu %xmm0, (%rax) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: avg_v8i16_const: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512F-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 -; AVX512F-NEXT: vmovdqu %xmm0, (%rax) -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v8i16_const: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 -; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 -; AVX512BW-NEXT: vmovdqu %xmm0, (%rax) -; AVX512BW-NEXT: retq +; AVX-LABEL: avg_v8i16_const: +; AVX: # BB#0: +; AVX-NEXT: vmovdqa (%rdi), %xmm0 +; AVX-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovdqu %xmm0, (%rax) +; AVX-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a %2 = zext <8 x i16> %1 to <8 x i32> %3 = add nuw nsw <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> @@ -2317,6 +2755,34 @@ define void @avg_v16i16_const(<16 x i16>* %a) { ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v16i16_const: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [5,6,7,8] +; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,3,4] +; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7] +; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7] +; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v16i16_const: ; AVX2: # BB#0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 @@ -2325,21 +2791,13 @@ define void @avg_v16i16_const(<16 x i16>* %a) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: avg_v16i16_const: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqu %ymm0, (%rax) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512BW-LABEL: avg_v16i16_const: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0 -; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; AVX512-LABEL: avg_v16i16_const: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: vmovdqu %ymm0, (%rax) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %a %2 = zext <16 x i16> %1 to <16 x i32> %3 = add nuw nsw <16 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> @@ -2413,6 +2871,54 @@ define void @avg_v32i16_const(<32 x i16>* %a) { ; SSE2-NEXT: movdqu %xmm5, (%rax) ; SSE2-NEXT: retq ; +; AVX1-LABEL: avg_v32i16_const: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [5,6,7,8] +; AVX1-NEXT: vpaddd %xmm0, %xmm7, %xmm9 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1,2,3,4] +; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpaddd %xmm0, %xmm5, %xmm5 +; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm3 +; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm8 +; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4 +; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5 +; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6 +; AVX1-NEXT: vpsrld $1, %xmm9, %xmm7 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm1[1],xmm7[2],xmm1[3],xmm7[4],xmm1[5],xmm7[6],xmm1[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm1[1],xmm6[2],xmm1[3],xmm6[4],xmm1[5],xmm6[6],xmm1[7] +; AVX1-NEXT: vpackusdw %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm1[1],xmm5[2],xmm1[3],xmm5[4],xmm1[5],xmm5[6],xmm1[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4],xmm1[5],xmm4[6],xmm1[7] +; AVX1-NEXT: vpackusdw %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4],xmm1[5],xmm3[6],xmm1[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4],xmm1[5],xmm2[6],xmm1[7] +; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0],xmm1[1],xmm8[2],xmm1[3],xmm8[4],xmm1[5],xmm8[6],xmm1[7] +; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vmovups %ymm0, (%rax) +; AVX1-NEXT: vmovups %ymm4, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: avg_v32i16_const: ; AVX2: # BB#0: ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero |