summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorJim Grosbach <grosbach@apple.com>2011-11-14 23:03:21 +0000
committerJim Grosbach <grosbach@apple.com>2011-11-14 23:03:21 +0000
commitffc658b056b7cc0b3f6a2626694b6a4216ed728d (patch)
treeebe680a03750316ed3c572177cd1306a4c0fd173 /test/CodeGen
parent88990248d3bfb2f265fcf27f8a032ac0eb14d09f (diff)
ARM VLDR/VSTR instructions don't need a size suffix.
Canonicallize on the non-suffixed form, but continue to accept assembly that has any correctly sized type suffix. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144583 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll2
-rw-r--r--test/CodeGen/ARM/2009-09-24-spill-align.ll2
-rw-r--r--test/CodeGen/ARM/2010-05-21-BuildVector.ll8
-rw-r--r--test/CodeGen/ARM/2011-10-26-memset-with-neon.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel-cmp-imm.ll8
-rw-r--r--test/CodeGen/ARM/fp.ll2
-rw-r--r--test/CodeGen/ARM/fpcmp-opt.ll4
-rw-r--r--test/CodeGen/ARM/fpmem.ll8
-rw-r--r--test/CodeGen/ARM/lsr-on-unrolled-loops.ll6
-rw-r--r--test/CodeGen/ARM/neon_ld1.ll4
-rw-r--r--test/CodeGen/ARM/reg_sequence.ll4
-rw-r--r--test/CodeGen/ARM/subreg-remat.ll16
-rw-r--r--test/CodeGen/ARM/vbsl-constant.ll18
-rw-r--r--test/CodeGen/ARM/vdup.ll2
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll6
-rw-r--r--test/CodeGen/ARM/vext.ll2
-rw-r--r--test/CodeGen/ARM/widen-vmovs.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll2
19 files changed, 50 insertions, 50 deletions
diff --git a/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll b/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll
index 0a157c96b31..426bd17590b 100644
--- a/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll
+++ b/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll
@@ -9,7 +9,7 @@ define void @test(double* %x, double* %y) nounwind {
br i1 %4, label %bb1, label %bb2
bb1:
-;CHECK: vstrhi.64
+;CHECK: vstrhi
store double %1, double* %y
br label %bb2
diff --git a/test/CodeGen/ARM/2009-09-24-spill-align.ll b/test/CodeGen/ARM/2009-09-24-spill-align.ll
index 8bfd02697b7..eb9c2d0f7f8 100644
--- a/test/CodeGen/ARM/2009-09-24-spill-align.ll
+++ b/test/CodeGen/ARM/2009-09-24-spill-align.ll
@@ -6,7 +6,7 @@ entry:
%arg0_poly16x4_t = alloca <4 x i16> ; <<4 x i16>*> [#uses=1]
%out_poly16_t = alloca i16 ; <i16*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
-; CHECK: vldr.64
+; CHECK: vldr
%0 = load <4 x i16>* %arg0_poly16x4_t, align 8 ; <<4 x i16>> [#uses=1]
%1 = extractelement <4 x i16> %0, i32 1 ; <i16> [#uses=1]
store i16 %1, i16* %out_poly16_t, align 2
diff --git a/test/CodeGen/ARM/2010-05-21-BuildVector.ll b/test/CodeGen/ARM/2010-05-21-BuildVector.ll
index cd1c9c8c042..a400b7b288c 100644
--- a/test/CodeGen/ARM/2010-05-21-BuildVector.ll
+++ b/test/CodeGen/ARM/2010-05-21-BuildVector.ll
@@ -10,28 +10,28 @@ entry:
%4 = ashr i32 %3, 30
%.sum = add i32 %4, 4
%5 = getelementptr inbounds float* %table, i32 %.sum
-;CHECK: vldr.32 s
+;CHECK: vldr s
%6 = load float* %5, align 4
%tmp11 = insertelement <4 x float> undef, float %6, i32 0
%7 = shl i32 %packedValue, 18
%8 = ashr i32 %7, 30
%.sum12 = add i32 %8, 4
%9 = getelementptr inbounds float* %table, i32 %.sum12
-;CHECK: vldr.32 s
+;CHECK: vldr s
%10 = load float* %9, align 4
%tmp9 = insertelement <4 x float> %tmp11, float %10, i32 1
%11 = shl i32 %packedValue, 20
%12 = ashr i32 %11, 30
%.sum13 = add i32 %12, 4
%13 = getelementptr inbounds float* %table, i32 %.sum13
-;CHECK: vldr.32 s
+;CHECK: vldr s
%14 = load float* %13, align 4
%tmp7 = insertelement <4 x float> %tmp9, float %14, i32 2
%15 = shl i32 %packedValue, 22
%16 = ashr i32 %15, 30
%.sum14 = add i32 %16, 4
%17 = getelementptr inbounds float* %table, i32 %.sum14
-;CHECK: vldr.32 s
+;CHECK: vldr s
%18 = load float* %17, align 4
%tmp5 = insertelement <4 x float> %tmp7, float %18, i32 3
%19 = fmul <4 x float> %tmp5, %2
diff --git a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
index 3c9216cde7b..42b14914814 100644
--- a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
+++ b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=arm -mcpu=cortex-a8 < %s | FileCheck %s
; Should trigger a NEON store.
-; CHECK: vstr.64
+; CHECK: vstr
define void @f_0_12(i8* nocapture %c) nounwind optsize {
entry:
call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false)
diff --git a/test/CodeGen/ARM/fast-isel-cmp-imm.ll b/test/CodeGen/ARM/fast-isel-cmp-imm.ll
index b1bf63f8166..33c60081a3e 100644
--- a/test/CodeGen/ARM/fast-isel-cmp-imm.ll
+++ b/test/CodeGen/ARM/fast-isel-cmp-imm.ll
@@ -26,9 +26,9 @@ entry:
; ARM: t1b
; THUMB: t1b
%cmp = fcmp oeq float %a, -0.000000e+00
-; ARM: vldr.32
+; ARM: vldr
; ARM: vcmpe.f32 s{{[0-9]+}}, s{{[0-9]+}}
-; THUMB: vldr.32
+; THUMB: vldr
; THUMB: vcmpe.f32 s{{[0-9]+}}, s{{[0-9]+}}
br i1 %cmp, label %if.then, label %if.end
@@ -63,9 +63,9 @@ entry:
; ARM: t2b
; THUMB: t2b
%cmp = fcmp oeq double %a, -0.000000e+00
-; ARM: vldr.64
+; ARM: vldr
; ARM: vcmpe.f64 d{{[0-9]+}}, d{{[0-9]+}}
-; THUMB: vldr.64
+; THUMB: vldr
; THUMB: vcmpe.f64 d{{[0-9]+}}, d{{[0-9]+}}
br i1 %cmp, label %if.then, label %if.end
diff --git a/test/CodeGen/ARM/fp.ll b/test/CodeGen/ARM/fp.ll
index ac023d19ec3..93601cf9d6c 100644
--- a/test/CodeGen/ARM/fp.ll
+++ b/test/CodeGen/ARM/fp.ll
@@ -42,7 +42,7 @@ entry:
define double @h(double* %v) {
;CHECK: h:
-;CHECK: vldr.64
+;CHECK: vldr
;CHECK-NEXT: vmov
entry:
%tmp = load double* %v ; <double> [#uses=1]
diff --git a/test/CodeGen/ARM/fpcmp-opt.ll b/test/CodeGen/ARM/fpcmp-opt.ll
index 7c0dd0e12a7..ad03202f8eb 100644
--- a/test/CodeGen/ARM/fpcmp-opt.ll
+++ b/test/CodeGen/ARM/fpcmp-opt.ll
@@ -14,8 +14,8 @@ entry:
; FINITE: beq
; NAN: t1:
-; NAN: vldr.32 s0,
-; NAN: vldr.32 s1,
+; NAN: vldr s0,
+; NAN: vldr s1,
; NAN: vcmpe.f32 s1, s0
; NAN: vmrs apsr_nzcv, fpscr
; NAN: beq
diff --git a/test/CodeGen/ARM/fpmem.ll b/test/CodeGen/ARM/fpmem.ll
index 38339334b44..8faa57896a8 100644
--- a/test/CodeGen/ARM/fpmem.ll
+++ b/test/CodeGen/ARM/fpmem.ll
@@ -8,7 +8,7 @@ define float @f1(float %a) {
define float @f2(float* %v, float %u) {
; CHECK: f2:
-; CHECK: vldr.32{{.*}}[
+; CHECK: vldr{{.*}}[
%tmp = load float* %v ; <float> [#uses=1]
%tmp1 = fadd float %tmp, %u ; <float> [#uses=1]
ret float %tmp1
@@ -16,7 +16,7 @@ define float @f2(float* %v, float %u) {
define float @f2offset(float* %v, float %u) {
; CHECK: f2offset:
-; CHECK: vldr.32{{.*}}, #4]
+; CHECK: vldr{{.*}}, #4]
%addr = getelementptr float* %v, i32 1
%tmp = load float* %addr
%tmp1 = fadd float %tmp, %u
@@ -25,7 +25,7 @@ define float @f2offset(float* %v, float %u) {
define float @f2noffset(float* %v, float %u) {
; CHECK: f2noffset:
-; CHECK: vldr.32{{.*}}, #-4]
+; CHECK: vldr{{.*}}, #-4]
%addr = getelementptr float* %v, i32 -1
%tmp = load float* %addr
%tmp1 = fadd float %tmp, %u
@@ -34,7 +34,7 @@ define float @f2noffset(float* %v, float %u) {
define void @f3(float %a, float %b, float* %v) {
; CHECK: f3:
-; CHECK: vstr.32{{.*}}[
+; CHECK: vstr{{.*}}[
%tmp = fadd float %a, %b ; <float> [#uses=1]
store float %tmp, float* %v
ret void
diff --git a/test/CodeGen/ARM/lsr-on-unrolled-loops.ll b/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
index 47379016cf1..ea5ae8f5187 100644
--- a/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
+++ b/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
@@ -4,9 +4,9 @@
; constant offset addressing, so that each of the following stores
; uses the same register.
-; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #32]
-; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #64]
-; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #96]
+; CHECK: vstr s{{.*}}, [{{(r[0-9]+)|(lr)}}, #32]
+; CHECK: vstr s{{.*}}, [{{(r[0-9]+)|(lr)}}, #64]
+; CHECK: vstr s{{.*}}, [{{(r[0-9]+)|(lr)}}, #96]
; We can also save a register in the outer loop, but that requires
; performing LSR on the outer loop.
diff --git a/test/CodeGen/ARM/neon_ld1.ll b/test/CodeGen/ARM/neon_ld1.ll
index c78872a4bca..c218ff523e2 100644
--- a/test/CodeGen/ARM/neon_ld1.ll
+++ b/test/CodeGen/ARM/neon_ld1.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon | grep vldr.64 | count 4
-; RUN: llc < %s -march=arm -mattr=+neon | grep vstr.64
+; RUN: llc < %s -march=arm -mattr=+neon | grep vldr | count 4
+; RUN: llc < %s -march=arm -mattr=+neon | grep vstr
; RUN: llc < %s -march=arm -mattr=+neon | grep vmov
define void @t1(<2 x i32>* %r, <4 x i16>* %a, <4 x i16>* %b) nounwind {
diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll
index 091a003ef9c..78b4e7ea84c 100644
--- a/test/CodeGen/ARM/reg_sequence.ll
+++ b/test/CodeGen/ARM/reg_sequence.ll
@@ -155,7 +155,7 @@ define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind {
; CHECK: t6:
-; CHECK: vldr.64
+; CHECK: vldr
; CHECK: vorr d[[D0:[0-9]+]], d[[D1:[0-9]+]]
; CHECK-NEXT: vld2.8 {d[[D1]][1], d[[D0]][1]}
%tmp1 = load <8 x i8>* %B ; <<8 x i8>> [#uses=2]
@@ -240,7 +240,7 @@ bb14: ; preds = %bb6
; PR7157
define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind {
; CHECK: t9:
-; CHECK: vldr.64
+; CHECK: vldr
; CHECK-NOT: vmov d{{.*}}, d16
; CHECK: vmov.i32 d17
; CHECK-NEXT: vstmia r0, {d16, d17}
diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll
index 993d7ec7505..03ae12c6dea 100644
--- a/test/CodeGen/ARM/subreg-remat.ll
+++ b/test/CodeGen/ARM/subreg-remat.ll
@@ -12,13 +12,13 @@ target triple = "thumbv7-apple-ios"
;
; CHECK: f1
; CHECK: vmov s1, r0
-; CHECK: vldr.32 s0, LCPI
+; CHECK: vldr s0, LCPI
; The vector must be spilled:
-; CHECK: vstr.64 d0,
+; CHECK: vstr d0,
; CHECK: asm clobber d0
; And reloaded after the asm:
-; CHECK: vldr.64 [[D16:d[0-9]+]],
-; CHECK: vstr.64 [[D16]], [r1]
+; CHECK: vldr [[D16:d[0-9]+]],
+; CHECK: vstr [[D16]], [r1]
define void @f1(float %x, <2 x float>* %p) {
%v1 = insertelement <2 x float> undef, float %x, i32 1
%v2 = insertelement <2 x float> %v1, float 0x400921FB60000000, i32 0
@@ -37,13 +37,13 @@ define void @f1(float %x, <2 x float>* %p) {
; virtual register. It doesn't read the old value.
;
; CHECK: f2
-; CHECK: vldr.32 s0, LCPI
+; CHECK: vldr s0, LCPI
; The vector must not be spilled:
-; CHECK-NOT: vstr.64
+; CHECK-NOT: vstr
; CHECK: asm clobber d0
; But instead rematerialize after the asm:
-; CHECK: vldr.32 [[S0:s[0-9]+]], LCPI
-; CHECK: vstr.64 [[D0:d[0-9]+]], [r0]
+; CHECK: vldr [[S0:s[0-9]+]], LCPI
+; CHECK: vstr [[D0:d[0-9]+]], [r0]
define void @f2(<2 x float>* %p) {
%v2 = insertelement <2 x float> undef, float 0x400921FB60000000, i32 0
%y = call double asm sideeffect "asm clobber $0", "=w,0,~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15},~{d16},~{d17},~{d18},~{d19},~{d20},~{d21},~{d22},~{d23},~{d24},~{d25},~{d26},~{d27},~{d28},~{d29},~{d30},~{d31}"(<2 x float> %v2) nounwind
diff --git a/test/CodeGen/ARM/vbsl-constant.ll b/test/CodeGen/ARM/vbsl-constant.ll
index 14e668efb1d..f157dbdb970 100644
--- a/test/CodeGen/ARM/vbsl-constant.ll
+++ b/test/CodeGen/ARM/vbsl-constant.ll
@@ -2,8 +2,8 @@
define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
;CHECK: v_bsli8:
-;CHECK: vldr.64
-;CHECK: vldr.64
+;CHECK: vldr
+;CHECK: vldr
;CHECK: vbsl
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -16,8 +16,8 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
;CHECK: v_bsli16:
-;CHECK: vldr.64
-;CHECK: vldr.64
+;CHECK: vldr
+;CHECK: vldr
;CHECK: vbsl
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -30,8 +30,8 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
;CHECK: v_bsli32:
-;CHECK: vldr.64
-;CHECK: vldr.64
+;CHECK: vldr
+;CHECK: vldr
;CHECK: vbsl
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -44,9 +44,9 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
;CHECK: v_bsli64:
-;CHECK: vldr.64
-;CHECK: vldr.64
-;CHECK: vldr.64
+;CHECK: vldr
+;CHECK: vldr
+;CHECK: vldr
;CHECK: vbsl
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll
index e99fac1f1e6..05332e4d8c5 100644
--- a/test/CodeGen/ARM/vdup.ll
+++ b/test/CodeGen/ARM/vdup.ll
@@ -254,7 +254,7 @@ entry:
;CHECK: redundantVdup:
;CHECK: vmov.i8
;CHECK-NOT: vdup.8
-;CHECK: vstr.64
+;CHECK: vstr
define void @redundantVdup(<8 x i8>* %ptr) nounwind {
%1 = insertelement <8 x i8> undef, i8 -128, i32 0
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
index 1a97982eb0a..a38a0feae04 100644
--- a/test/CodeGen/ARM/vector-DAGCombine.ll
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -80,7 +80,7 @@ declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind
; so they are not split up into i32 values. Radar 8755338.
define void @i64_buildvector(i64* %ptr, <2 x i64>* %vp) nounwind {
; CHECK: i64_buildvector
-; CHECK: vldr.64
+; CHECK: vldr
%t0 = load i64* %ptr, align 4
%t1 = insertelement <2 x i64> undef, i64 %t0, i32 0
store <2 x i64> %t1, <2 x i64>* %vp
@@ -89,7 +89,7 @@ define void @i64_buildvector(i64* %ptr, <2 x i64>* %vp) nounwind {
define void @i64_insertelement(i64* %ptr, <2 x i64>* %vp) nounwind {
; CHECK: i64_insertelement
-; CHECK: vldr.64
+; CHECK: vldr
%t0 = load i64* %ptr, align 4
%vec = load <2 x i64>* %vp
%t1 = insertelement <2 x i64> %vec, i64 %t0, i32 0
@@ -99,7 +99,7 @@ define void @i64_insertelement(i64* %ptr, <2 x i64>* %vp) nounwind {
define void @i64_extractelement(i64* %ptr, <2 x i64>* %vp) nounwind {
; CHECK: i64_extractelement
-; CHECK: vstr.64
+; CHECK: vstr
%vec = load <2 x i64>* %vp
%t1 = extractelement <2 x i64> %vec, i32 0
store i64 %t1, i64* %ptr
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index 65b5913e40a..e224bdfe25a 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -138,7 +138,7 @@ define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; Make sure this doesn't crash
define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
; CHECK: test_elem_mismatch:
-; CHECK: vstr.64
+; CHECK: vstr
%tmp0 = load <2 x i64>* %src, align 16
%tmp1 = bitcast <2 x i64> %tmp0 to <4 x i32>
%tmp2 = extractelement <4 x i32> %tmp1, i32 0
diff --git a/test/CodeGen/ARM/widen-vmovs.ll b/test/CodeGen/ARM/widen-vmovs.ll
index 8fd99ba7af4..1f5113eeb33 100644
--- a/test/CodeGen/ARM/widen-vmovs.ll
+++ b/test/CodeGen/ARM/widen-vmovs.ll
@@ -3,7 +3,7 @@ target triple = "thumbv7-apple-ios"
; The 0.0 constant is loaded from the constant pool and kept in a register.
; CHECK: %entry
-; CHECK: vldr.32 s
+; CHECK: vldr s
; The float loop variable is initialized with a vmovs from the constant register.
; The vmovs is first widened to a vmovd, and then converted to a vorr because of the v2f32 vadd.f32.
; CHECK: vorr [[DL:d[0-9]+]], [[DN:d[0-9]+]]
diff --git a/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll b/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
index 034a28f003d..524e5a6b7b6 100644
--- a/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
+++ b/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
@@ -5,7 +5,7 @@ define void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y
entry:
; -- The loop following the load should only use a single add-literation
; instruction.
-; CHECK: ldr.64
+; CHECK: vldr
; CHECK: adds r{{[0-9]+.*}}#1
; CHECK-NOT: adds
; CHECK: subsections_via_symbols
diff --git a/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll b/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
index 01fb0a581a5..06762bad854 100644
--- a/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
+++ b/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
@@ -23,7 +23,7 @@ entry:
%4 = insertelement <2 x double> %2, double %V.0.ph, i32 1 ; <<2 x double>> [#uses=2]
; Constant pool load followed by add.
; Then clobber the loaded register, not the sum.
-; CHECK: vldr.64 [[LDR:d.*]],
+; CHECK: vldr [[LDR:d.*]],
; CHECK: LPC0_0:
; CHECK: vadd.f64 [[ADD:d.*]], [[LDR]], [[LDR]]
; CHECK-NOT: vmov.f64 [[ADD]]