summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/vmovq.ll
blob: 2b4ae679573329e1dbfd54b5c2ce15d4d477023f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx    | FileCheck %s --check-prefix=AVX

define <2 x i64> @PR25554(<2 x i64> %v0, <2 x i64> %v1) {
; SSE-LABEL: PR25554:
; SSE:       # %bb.0:
; SSE-NEXT:    movl $1, %eax
; SSE-NEXT:    movq %rax, %xmm1
; SSE-NEXT:    por %xmm1, %xmm0
; SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
; SSE-NEXT:    paddq %xmm1, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: PR25554:
; AVX:       # %bb.0:
; AVX-NEXT:    movl $1, %eax
; AVX-NEXT:    vmovq %rax, %xmm1
; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
; AVX-NEXT:    retq

  %c1 = or <2 x i64> %v0, <i64 1, i64 0>
  %c2 = add <2 x i64> %c1, <i64 0, i64 1>
  ret <2 x i64> %c2
}