; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=X32 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=X64 define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, i32* %loadptr) nounwind { ; X32-LABEL: f: ; X32: # %bb.0: ; X32-NEXT: pushl %ebp ; X32-NEXT: movl %esp, %ebp ; X32-NEXT: andl $-32, %esp ; X32-NEXT: subl $32, %esp ; X32-NEXT: vmovdqa 104(%ebp), %ymm3 ; X32-NEXT: vmovdqa 72(%ebp), %ymm4 ; X32-NEXT: vmovdqa 40(%ebp), %ymm5 ; X32-NEXT: movl 8(%ebp), %ecx ; X32-NEXT: movl 136(%ebp), %edx ; X32-NEXT: movl (%edx), %eax ; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: vmovntps %ymm0, (%ecx) ; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm2, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm1, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntpd %ymm0, (%ecx) ; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm5, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm4, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm3, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: movl %ebp, %esp ; X32-NEXT: popl %ebp ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; ; X64-LABEL: f: ; X64: # %bb.0: ; X64-NEXT: movl (%rsi), %eax ; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: vmovntps %ymm0, (%rdi) ; X64-NEXT: vpaddq {{.*}}(%rip), %ymm2, %ymm0 ; X64-NEXT: addl (%rsi), %eax ; X64-NEXT: vmovntdq %ymm0, (%rdi) ; X64-NEXT: vaddpd {{.*}}(%rip), %ymm1, %ymm0 ; X64-NEXT: addl (%rsi), %eax ; X64-NEXT: vmovntpd %ymm0, (%rdi) ; X64-NEXT: vpaddd {{.*}}(%rip), %ymm3, %ymm0 ; X64-NEXT: addl (%rsi), %eax ; X64-NEXT: vmovntdq %ymm0, (%rdi) ; X64-NEXT: vpaddw {{.*}}(%rip), %ymm4, %ymm0 ; X64-NEXT: addl (%rsi), %eax ; X64-NEXT: vmovntdq %ymm0, (%rdi) ; X64-NEXT: vpaddb {{.*}}(%rip), %ymm5, %ymm0 ; X64-NEXT: addl (%rsi), %eax ; X64-NEXT: vmovntdq %ymm0, (%rdi) ; X64-NEXT: vzeroupper ; X64-NEXT: retq %v0 = load i32, i32* %loadptr, align 1 %cast = bitcast i8* %B to <8 x float>* %A2 = fadd <8 x float> %A, store <8 x float> %A2, <8 x float>* %cast, align 32, !nontemporal !0 %v1 = load i32, i32* %loadptr, align 1 %cast1 = bitcast i8* %B to <4 x i64>* %E2 = add <4 x i64> %E, store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0 %v2 = load i32, i32* %loadptr, align 1 %cast2 = bitcast i8* %B to <4 x double>* %C2 = fadd <4 x double> %C, store <4 x double> %C2, <4 x double>* %cast2, align 32, !nontemporal !0 %v3 = load i32, i32* %loadptr, align 1 %cast3 = bitcast i8* %B to <8 x i32>* %F2 = add <8 x i32> %F, store <8 x i32> %F2, <8 x i32>* %cast3, align 32, !nontemporal !0 %v4 = load i32, i32* %loadptr, align 1 %cast4 = bitcast i8* %B to <16 x i16>* %G2 = add <16 x i16> %G, store <16 x i16> %G2, <16 x i16>* %cast4, align 32, !nontemporal !0 %v5 = load i32, i32* %loadptr, align 1 %cast5 = bitcast i8* %B to <32 x i8>* %H2 = add <32 x i8> %H, store <32 x i8> %H2, <32 x i8>* %cast5, align 32, !nontemporal !0 %v6 = load i32, i32* %loadptr, align 1 %sum1 = add i32 %v0, %v1 %sum2 = add i32 %sum1, %v2 %sum3 = add i32 %sum2, %v3 %sum4 = add i32 %sum3, %v4 %sum5 = add i32 %sum4, %v5 %sum6 = add i32 %sum5, %v6 ret i32 %sum5 } !0 = !{i32 1}