summaryrefslogtreecommitdiff
path: root/test/CodeGen/Hexagon
diff options
context:
space:
mode:
authorKrzysztof Parzyszek <kparzysz@codeaurora.org>2017-02-10 15:33:13 +0000
committerKrzysztof Parzyszek <kparzysz@codeaurora.org>2017-02-10 15:33:13 +0000
commit7f4371b614a6e25f0efd2133d11d631329eba1fb (patch)
treeaf02901a3a8527fc838db034e4823dfc43983651 /test/CodeGen/Hexagon
parent940b0c036d390ebf57122da4e616bbcd48f5f4dd (diff)
[Hexagon] Replace instruction definitions with auto-generated ones
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294753 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/Hexagon')
-rw-r--r--test/CodeGen/Hexagon/BranchPredict.ll6
-rw-r--r--test/CodeGen/Hexagon/adde.ll24
-rw-r--r--test/CodeGen/Hexagon/addh-sext-trunc.ll2
-rw-r--r--test/CodeGen/Hexagon/addh-shifted.ll2
-rw-r--r--test/CodeGen/Hexagon/addh.ll2
-rw-r--r--test/CodeGen/Hexagon/alu64.ll132
-rw-r--r--test/CodeGen/Hexagon/args.ll8
-rw-r--r--test/CodeGen/Hexagon/bit-eval.ll2
-rw-r--r--test/CodeGen/Hexagon/bit-skip-byval.ll2
-rw-r--r--test/CodeGen/Hexagon/branchfolder-keep-impdef.ll2
-rw-r--r--test/CodeGen/Hexagon/brev_ld.ll12
-rw-r--r--test/CodeGen/Hexagon/brev_st.ll10
-rw-r--r--test/CodeGen/Hexagon/cext-valid-packet1.ll4
-rw-r--r--test/CodeGen/Hexagon/circ_ld.ll12
-rw-r--r--test/CodeGen/Hexagon/circ_ldw.ll2
-rw-r--r--test/CodeGen/Hexagon/circ_st.ll10
-rw-r--r--test/CodeGen/Hexagon/clr_set_toggle.ll30
-rw-r--r--test/CodeGen/Hexagon/cmp.ll22
-rw-r--r--test/CodeGen/Hexagon/combine.ll2
-rw-r--r--test/CodeGen/Hexagon/constp-combine-neg.ll6
-rw-r--r--test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll2
-rw-r--r--test/CodeGen/Hexagon/dead-store-stack.ll2
-rw-r--r--test/CodeGen/Hexagon/eh_return.ll2
-rw-r--r--test/CodeGen/Hexagon/extload-combine.ll18
-rw-r--r--test/CodeGen/Hexagon/extract-basic.ll6
-rw-r--r--test/CodeGen/Hexagon/fadd.ll2
-rw-r--r--test/CodeGen/Hexagon/float-amode.ll12
-rw-r--r--test/CodeGen/Hexagon/fmul.ll2
-rw-r--r--test/CodeGen/Hexagon/fsel.ll4
-rw-r--r--test/CodeGen/Hexagon/fsub.ll2
-rw-r--r--test/CodeGen/Hexagon/fusedandshift.ll2
-rw-r--r--test/CodeGen/Hexagon/hwloop-cleanup.ll6
-rw-r--r--test/CodeGen/Hexagon/hwloop-loop1.ll4
-rw-r--r--test/CodeGen/Hexagon/hwloop1.ll16
-rw-r--r--test/CodeGen/Hexagon/hwloop2.ll2
-rw-r--r--test/CodeGen/Hexagon/hwloop4.ll6
-rw-r--r--test/CodeGen/Hexagon/hwloop5.ll4
-rw-r--r--test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll2
-rw-r--r--test/CodeGen/Hexagon/insert-basic.ll8
-rw-r--r--test/CodeGen/Hexagon/insert4.ll4
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_alu.ll38
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_perm.ll24
-rw-r--r--test/CodeGen/Hexagon/intrinsics/cr.ll30
-rw-r--r--test/CodeGen/Hexagon/intrinsics/system_user.ll2
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_alu.ll254
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_bit.ll58
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_complex.ll94
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_fp.ll44
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll430
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_perm.ll16
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_pred.ll94
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_shift.ll202
-rw-r--r--test/CodeGen/Hexagon/newvalueSameReg.ll4
-rw-r--r--test/CodeGen/Hexagon/newvaluejump.ll2
-rw-r--r--test/CodeGen/Hexagon/newvaluejump2.ll2
-rw-r--r--test/CodeGen/Hexagon/opt-addr-mode.ll4
-rw-r--r--test/CodeGen/Hexagon/opt-fabs.ll2
-rw-r--r--test/CodeGen/Hexagon/opt-fneg.ll6
-rw-r--r--test/CodeGen/Hexagon/opt-spill-volatile.ll2
-rw-r--r--test/CodeGen/Hexagon/pic-local.ll4
-rw-r--r--test/CodeGen/Hexagon/pic-simple.ll6
-rw-r--r--test/CodeGen/Hexagon/pic-static.ll6
-rw-r--r--test/CodeGen/Hexagon/predicate-logical.ll2
-rw-r--r--test/CodeGen/Hexagon/predicate-rcmp.ll2
-rw-r--r--test/CodeGen/Hexagon/ret-struct-by-val.ll2
-rw-r--r--test/CodeGen/Hexagon/signed_immediates.ll6
-rw-r--r--test/CodeGen/Hexagon/stack-align1.ll6
-rw-r--r--test/CodeGen/Hexagon/stack-align2.ll10
-rw-r--r--test/CodeGen/Hexagon/stack-alloca1.ll2
-rw-r--r--test/CodeGen/Hexagon/stack-alloca2.ll6
-rw-r--r--test/CodeGen/Hexagon/store-shift.ll12
-rw-r--r--test/CodeGen/Hexagon/sube.ll16
-rw-r--r--test/CodeGen/Hexagon/subi-asl.ll6
-rw-r--r--test/CodeGen/Hexagon/swp-const-tc.ll2
-rw-r--r--test/CodeGen/Hexagon/swp-matmul-bitext.ll2
-rw-r--r--test/CodeGen/Hexagon/swp-max.ll4
-rw-r--r--test/CodeGen/Hexagon/swp-multi-loops.ll8
-rw-r--r--test/CodeGen/Hexagon/swp-stages4.ll4
-rw-r--r--test/CodeGen/Hexagon/swp-stages5.ll2
-rw-r--r--test/CodeGen/Hexagon/swp-vmult.ll8
-rw-r--r--test/CodeGen/Hexagon/swp-vsum.ll6
-rw-r--r--test/CodeGen/Hexagon/tail-dup-subreg-map.ll2
-rw-r--r--test/CodeGen/Hexagon/tfr-to-combine.ll6
-rw-r--r--test/CodeGen/Hexagon/tls_pic.ll4
-rw-r--r--test/CodeGen/Hexagon/two-crash.ll2
-rw-r--r--test/CodeGen/Hexagon/vaddh.ll2
-rw-r--r--test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll2
-rw-r--r--test/CodeGen/Hexagon/vect/vect-loadv4i16.ll4
-rw-r--r--test/CodeGen/Hexagon/vect/vect-shift-imm.ll12
-rw-r--r--test/CodeGen/Hexagon/vect/vect-vshifts.ll4
-rw-r--r--test/CodeGen/Hexagon/vect/vect-xor.ll2
91 files changed, 934 insertions, 934 deletions
diff --git a/test/CodeGen/Hexagon/BranchPredict.ll b/test/CodeGen/Hexagon/BranchPredict.ll
index 17d169974e5..40791c98148 100644
--- a/test/CodeGen/Hexagon/BranchPredict.ll
+++ b/test/CodeGen/Hexagon/BranchPredict.ll
@@ -9,7 +9,7 @@
@j = external global i32
define i32 @foo(i32 %a) nounwind {
-; CHECK: if{{ *}}(!p{{[0-3]}}.new) jump:nt
+; CHECK: if (!p{{[0-3]}}.new) jump:nt
entry:
%tobool = icmp eq i32 %a, 0
br i1 %tobool, label %if.else, label %if.then, !prof !0
@@ -31,7 +31,7 @@ return: ; preds = %if.else, %if.then
declare i32 @foobar(...)
define i32 @bar(i32 %a) nounwind {
-; CHECK: if{{ *}}(p{{[0-3]}}.new) jump:nt
+; CHECK: if (p{{[0-3]}}.new) jump:nt
entry:
%tobool = icmp eq i32 %a, 0
br i1 %tobool, label %if.else, label %if.then, !prof !1
@@ -51,7 +51,7 @@ return: ; preds = %if.else, %if.then
}
define i32 @foo_bar(i32 %a, i16 signext %b) nounwind {
-; CHECK: if{{ *}}(!cmp.eq(r{{[0-9]*}}.new, #0)) jump:nt
+; CHECK: if (!cmp.eq(r{{[0-9]*}}.new,#0)) jump:nt
entry:
%0 = load i32, i32* @j, align 4
%tobool = icmp eq i32 %0, 0
diff --git a/test/CodeGen/Hexagon/adde.ll b/test/CodeGen/Hexagon/adde.ll
index 43ddb4307ef..67594ad03be 100644
--- a/test/CodeGen/Hexagon/adde.ll
+++ b/test/CodeGen/Hexagon/adde.ll
@@ -1,17 +1,17 @@
; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1)
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0)
-; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0,#1)
+; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0,#0)
+; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
define void @check_adde_addc (i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) {
diff --git a/test/CodeGen/Hexagon/addh-sext-trunc.ll b/test/CodeGen/Hexagon/addh-sext-trunc.ll
index 7f219944436..ec5dc611105 100644
--- a/test/CodeGen/Hexagon/addh-sext-trunc.ll
+++ b/test/CodeGen/Hexagon/addh-sext-trunc.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{H|h}})
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{H|h}})
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon-unknown-none"
diff --git a/test/CodeGen/Hexagon/addh-shifted.ll b/test/CodeGen/Hexagon/addh-shifted.ll
index eb263521b42..697a5c5c69b 100644
--- a/test/CodeGen/Hexagon/addh-shifted.ll
+++ b/test/CodeGen/Hexagon/addh-shifted.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}}):<<16
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}}):<<16
define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone {
entry:
diff --git a/test/CodeGen/Hexagon/addh.ll b/test/CodeGen/Hexagon/addh.ll
index c2b536c4669..8217d6753cb 100644
--- a/test/CodeGen/Hexagon/addh.ll
+++ b/test/CodeGen/Hexagon/addh.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}})
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}})
define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone {
entry:
diff --git a/test/CodeGen/Hexagon/alu64.ll b/test/CodeGen/Hexagon/alu64.ll
index f986f135937..453b40a6ee8 100644
--- a/test/CodeGen/Hexagon/alu64.ll
+++ b/test/CodeGen/Hexagon/alu64.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; CHECK-LABEL: @test00
-; CHECK: = cmp.eq(r1:0, r3:2)
+; CHECK: = cmp.eq(r1:0,r3:2)
define i32 @test00(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.C2.cmpeqp(i64 %Rs, i64 %Rt)
@@ -9,7 +9,7 @@ entry:
}
; CHECK-LABEL: @test01
-; CHECK: = cmp.gt(r1:0, r3:2)
+; CHECK: = cmp.gt(r1:0,r3:2)
define i32 @test01(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.C2.cmpgtp(i64 %Rs, i64 %Rt)
@@ -17,7 +17,7 @@ entry:
}
; CHECK-LABEL: @test02
-; CHECK: = cmp.gtu(r1:0, r3:2)
+; CHECK: = cmp.gtu(r1:0,r3:2)
define i32 @test02(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.C2.cmpgtup(i64 %Rs, i64 %Rt)
@@ -25,7 +25,7 @@ entry:
}
; CHECK-LABEL: @test10
-; CHECK: = cmp.eq(r0, r1)
+; CHECK: = cmp.eq(r0,r1)
define i32 @test10(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpeq(i32 %Rs, i32 %Rt)
@@ -33,7 +33,7 @@ entry:
}
; CHECK-LABEL: @test11
-; CHECK: = !cmp.eq(r0, r1)
+; CHECK: = !cmp.eq(r0,r1)
define i32 @test11(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpneq(i32 %Rs, i32 %Rt)
@@ -41,7 +41,7 @@ entry:
}
; CHECK-LABEL: @test12
-; CHECK: = cmp.eq(r0, #23)
+; CHECK: = cmp.eq(r0,#23)
define i32 @test12(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpeqi(i32 %Rs, i32 23)
@@ -49,7 +49,7 @@ entry:
}
; CHECK-LABEL: @test13
-; CHECK: = !cmp.eq(r0, #47)
+; CHECK: = !cmp.eq(r0,#47)
define i32 @test13(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpneqi(i32 %Rs, i32 47)
@@ -57,7 +57,7 @@ entry:
}
; CHECK-LABEL: @test20
-; CHECK: = cmpb.eq(r0, r1)
+; CHECK: = cmpb.eq(r0,r1)
define i32 @test20(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbeq(i32 %Rs, i32 %Rt)
@@ -65,7 +65,7 @@ entry:
}
; CHECK-LABEL: @test21
-; CHECK: = cmpb.gt(r0, r1)
+; CHECK: = cmpb.gt(r0,r1)
define i32 @test21(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgt(i32 %Rs, i32 %Rt)
@@ -73,7 +73,7 @@ entry:
}
; CHECK-LABEL: @test22
-; CHECK: = cmpb.gtu(r0, r1)
+; CHECK: = cmpb.gtu(r0,r1)
define i32 @test22(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgtu(i32 %Rs, i32 %Rt)
@@ -81,7 +81,7 @@ entry:
}
; CHECK-LABEL: @test23
-; CHECK: = cmpb.eq(r0, #56)
+; CHECK: = cmpb.eq(r0,#56)
define i32 @test23(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbeqi(i32 %Rs, i32 56)
@@ -89,7 +89,7 @@ entry:
}
; CHECK-LABEL: @test24
-; CHECK: = cmpb.gt(r0, #29)
+; CHECK: = cmpb.gt(r0,#29)
define i32 @test24(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgti(i32 %Rs, i32 29)
@@ -97,7 +97,7 @@ entry:
}
; CHECK-LABEL: @test25
-; CHECK: = cmpb.gtu(r0, #111)
+; CHECK: = cmpb.gtu(r0,#111)
define i32 @test25(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgtui(i32 %Rs, i32 111)
@@ -105,7 +105,7 @@ entry:
}
; CHECK-LABEL: @test30
-; CHECK: = cmph.eq(r0, r1)
+; CHECK: = cmph.eq(r0,r1)
define i32 @test30(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpheq(i32 %Rs, i32 %Rt)
@@ -113,7 +113,7 @@ entry:
}
; CHECK-LABEL: @test31
-; CHECK: = cmph.gt(r0, r1)
+; CHECK: = cmph.gt(r0,r1)
define i32 @test31(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgt(i32 %Rs, i32 %Rt)
@@ -121,7 +121,7 @@ entry:
}
; CHECK-LABEL: @test32
-; CHECK: = cmph.gtu(r0, r1)
+; CHECK: = cmph.gtu(r0,r1)
define i32 @test32(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgtu(i32 %Rs, i32 %Rt)
@@ -129,7 +129,7 @@ entry:
}
; CHECK-LABEL: @test33
-; CHECK: = cmph.eq(r0, #-123)
+; CHECK: = cmph.eq(r0,#-123)
define i32 @test33(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpheqi(i32 %Rs, i32 -123)
@@ -137,7 +137,7 @@ entry:
}
; CHECK-LABEL: @test34
-; CHECK: = cmph.gt(r0, #-3)
+; CHECK: = cmph.gt(r0,#-3)
define i32 @test34(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgti(i32 %Rs, i32 -3)
@@ -145,7 +145,7 @@ entry:
}
; CHECK-LABEL: @test35
-; CHECK: = cmph.gtu(r0, #13)
+; CHECK: = cmph.gtu(r0,#13)
define i32 @test35(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgtui(i32 %Rs, i32 13)
@@ -153,7 +153,7 @@ entry:
}
; CHECK-LABEL: @test40
-; CHECK: = vmux(p0, r3:2, r5:4)
+; CHECK: = vmux(p0,r3:2,r5:4)
define i64 @test40(i32 %Pu, i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.C2.vmux(i32 %Pu, i64 %Rs, i64 %Rt)
@@ -161,7 +161,7 @@ entry:
}
; CHECK-LABEL: @test41
-; CHECK: = any8(vcmpb.eq(r1:0, r3:2))
+; CHECK: = any8(vcmpb.eq(r1:0,r3:2))
define i32 @test41(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %Rs, i64 %Rt)
@@ -169,7 +169,7 @@ entry:
}
; CHECK-LABEL: @test50
-; CHECK: = add(r1:0, r3:2)
+; CHECK: = add(r1:0,r3:2)
define i64 @test50(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.addp(i64 %Rs, i64 %Rt)
@@ -177,7 +177,7 @@ entry:
}
; CHECK-LABEL: @test51
-; CHECK: = add(r1:0, r3:2):sat
+; CHECK: = add(r1:0,r3:2):sat
define i64 @test51(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.addpsat(i64 %Rs, i64 %Rt)
@@ -185,7 +185,7 @@ entry:
}
; CHECK-LABEL: @test52
-; CHECK: = sub(r1:0, r3:2)
+; CHECK: = sub(r1:0,r3:2)
define i64 @test52(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.subp(i64 %Rs, i64 %Rt)
@@ -193,7 +193,7 @@ entry:
}
; CHECK-LABEL: @test53
-; CHECK: = add(r1:0, r3:2):raw:
+; CHECK: = add(r1:0,r3:2):raw:
define i64 @test53(i32 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.addsp(i32 %Rs, i64 %Rt)
@@ -201,7 +201,7 @@ entry:
}
; CHECK-LABEL: @test54
-; CHECK: = and(r1:0, r3:2)
+; CHECK: = and(r1:0,r3:2)
define i64 @test54(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.andp(i64 %Rs, i64 %Rt)
@@ -209,7 +209,7 @@ entry:
}
; CHECK-LABEL: @test55
-; CHECK: = or(r1:0, r3:2)
+; CHECK: = or(r1:0,r3:2)
define i64 @test55(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.orp(i64 %Rs, i64 %Rt)
@@ -217,7 +217,7 @@ entry:
}
; CHECK-LABEL: @test56
-; CHECK: = xor(r1:0, r3:2)
+; CHECK: = xor(r1:0,r3:2)
define i64 @test56(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.xorp(i64 %Rs, i64 %Rt)
@@ -225,7 +225,7 @@ entry:
}
; CHECK-LABEL: @test57
-; CHECK: = and(r1:0, ~r3:2)
+; CHECK: = and(r1:0,~r3:2)
define i64 @test57(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A4.andnp(i64 %Rs, i64 %Rt)
@@ -233,7 +233,7 @@ entry:
}
; CHECK-LABEL: @test58
-; CHECK: = or(r1:0, ~r3:2)
+; CHECK: = or(r1:0,~r3:2)
define i64 @test58(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A4.ornp(i64 %Rs, i64 %Rt)
@@ -241,7 +241,7 @@ entry:
}
; CHECK-LABEL: @test60
-; CHECK: = add(r0.l, r1.l)
+; CHECK: = add(r0.l,r1.l)
define i32 @test60(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %Rs, i32 %Rt)
@@ -249,7 +249,7 @@ entry:
}
; CHECK-LABEL: @test61
-; CHECK: = add(r0.l, r1.h)
+; CHECK: = add(r0.l,r1.h)
define i32 @test61(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %Rs, i32 %Rt)
@@ -257,7 +257,7 @@ entry:
}
; CHECK-LABEL: @test62
-; CHECK: = add(r0.l, r1.l):sat
+; CHECK: = add(r0.l,r1.l):sat
define i32 @test62(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %Rs, i32 %Rt)
@@ -265,7 +265,7 @@ entry:
}
; CHECK-LABEL: @test63
-; CHECK: = add(r0.l, r1.h):sat
+; CHECK: = add(r0.l,r1.h):sat
define i32 @test63(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %Rs, i32 %Rt)
@@ -273,7 +273,7 @@ entry:
}
; CHECK-LABEL: @test64
-; CHECK: = add(r0.l, r1.l):<<16
+; CHECK: = add(r0.l,r1.l):<<16
define i32 @test64(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %Rs, i32 %Rt)
@@ -281,7 +281,7 @@ entry:
}
; CHECK-LABEL: @test65
-; CHECK: = add(r0.l, r1.h):<<16
+; CHECK: = add(r0.l,r1.h):<<16
define i32 @test65(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %Rs, i32 %Rt)
@@ -289,7 +289,7 @@ entry:
}
; CHECK-LABEL: @test66
-; CHECK: = add(r0.h, r1.l):<<16
+; CHECK: = add(r0.h,r1.l):<<16
define i32 @test66(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %Rs, i32 %Rt)
@@ -297,7 +297,7 @@ entry:
}
; CHECK-LABEL: @test67
-; CHECK: = add(r0.h, r1.h):<<16
+; CHECK: = add(r0.h,r1.h):<<16
define i32 @test67(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %Rs, i32 %Rt)
@@ -305,7 +305,7 @@ entry:
}
; CHECK-LABEL: @test68
-; CHECK: = add(r0.l, r1.l):sat:<<16
+; CHECK: = add(r0.l,r1.l):sat:<<16
define i32 @test68(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %Rs, i32 %Rt)
@@ -313,7 +313,7 @@ entry:
}
; CHECK-LABEL: @test69
-; CHECK: = add(r0.l, r1.h):sat:<<16
+; CHECK: = add(r0.l,r1.h):sat:<<16
define i32 @test69(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %Rs, i32 %Rt)
@@ -321,7 +321,7 @@ entry:
}
; CHECK-LABEL: @test6A
-; CHECK: = add(r0.h, r1.l):sat:<<16
+; CHECK: = add(r0.h,r1.l):sat:<<16
define i32 @test6A(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %Rs, i32 %Rt)
@@ -329,7 +329,7 @@ entry:
}
; CHECK-LABEL: @test6B
-; CHECK: = add(r0.h, r1.h):sat:<<16
+; CHECK: = add(r0.h,r1.h):sat:<<16
define i32 @test6B(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %Rs, i32 %Rt)
@@ -337,7 +337,7 @@ entry:
}
; CHECK-LABEL: @test70
-; CHECK: = sub(r0.l, r1.l)
+; CHECK: = sub(r0.l,r1.l)
define i32 @test70(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %Rs, i32 %Rt)
@@ -345,7 +345,7 @@ entry:
}
; CHECK-LABEL: @test71
-; CHECK: = sub(r0.l, r1.h)
+; CHECK: = sub(r0.l,r1.h)
define i32 @test71(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %Rs, i32 %Rt)
@@ -353,7 +353,7 @@ entry:
}
; CHECK-LABEL: @test72
-; CHECK: = sub(r0.l, r1.l):sat
+; CHECK: = sub(r0.l,r1.l):sat
define i32 @test72(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %Rs, i32 %Rt)
@@ -361,7 +361,7 @@ entry:
}
; CHECK-LABEL: @test73
-; CHECK: = sub(r0.l, r1.h):sat
+; CHECK: = sub(r0.l,r1.h):sat
define i32 @test73(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %Rs, i32 %Rt)
@@ -369,7 +369,7 @@ entry:
}
; CHECK-LABEL: @test74
-; CHECK: = sub(r0.l, r1.l):<<16
+; CHECK: = sub(r0.l,r1.l):<<16
define i32 @test74(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %Rs, i32 %Rt)
@@ -377,7 +377,7 @@ entry:
}
; CHECK-LABEL: @test75
-; CHECK: = sub(r0.l, r1.h):<<16
+; CHECK: = sub(r0.l,r1.h):<<16
define i32 @test75(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %Rs, i32 %Rt)
@@ -385,7 +385,7 @@ entry:
}
; CHECK-LABEL: @test76
-; CHECK: = sub(r0.h, r1.l):<<16
+; CHECK: = sub(r0.h,r1.l):<<16
define i32 @test76(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %Rs, i32 %Rt)
@@ -393,7 +393,7 @@ entry:
}
; CHECK-LABEL: @test77
-; CHECK: = sub(r0.h, r1.h):<<16
+; CHECK: = sub(r0.h,r1.h):<<16
define i32 @test77(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %Rs, i32 %Rt)
@@ -401,7 +401,7 @@ entry:
}
; CHECK-LABEL: @test78
-; CHECK: = sub(r0.l, r1.l):sat:<<16
+; CHECK: = sub(r0.l,r1.l):sat:<<16
define i32 @test78(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %Rs, i32 %Rt)
@@ -409,7 +409,7 @@ entry:
}
; CHECK-LABEL: @test79
-; CHECK: = sub(r0.l, r1.h):sat:<<16
+; CHECK: = sub(r0.l,r1.h):sat:<<16
define i32 @test79(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %Rs, i32 %Rt)
@@ -417,7 +417,7 @@ entry:
}
; CHECK-LABEL: @test7A
-; CHECK: = sub(r0.h, r1.l):sat:<<16
+; CHECK: = sub(r0.h,r1.l):sat:<<16
define i32 @test7A(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %Rs, i32 %Rt)
@@ -425,7 +425,7 @@ entry:
}
; CHECK-LABEL: @test7B
-; CHECK: = sub(r0.h, r1.h):sat:<<16
+; CHECK: = sub(r0.h,r1.h):sat:<<16
define i32 @test7B(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %Rs, i32 %Rt)
@@ -433,7 +433,7 @@ entry:
}
; CHECK-LABEL: @test90
-; CHECK: = and(#1, asl(r0, #2))
+; CHECK: = and(#1,asl(r0,#2))
define i32 @test90(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.andi.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -441,7 +441,7 @@ entry:
}
; CHECK-LABEL: @test91
-; CHECK: = or(#1, asl(r0, #2))
+; CHECK: = or(#1,asl(r0,#2))
define i32 @test91(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.ori.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -449,7 +449,7 @@ entry:
}
; CHECK-LABEL: @test92
-; CHECK: = add(#1, asl(r0, #2))
+; CHECK: = add(#1,asl(r0,#2))
define i32 @test92(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.addi.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -457,7 +457,7 @@ entry:
}
; CHECK-LABEL: @test93
-; CHECK: = sub(#1, asl(r0, #2))
+; CHECK: = sub(#1,asl(r0,#2))
define i32 @test93(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.subi.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -465,7 +465,7 @@ entry:
}
; CHECK-LABEL: @test94
-; CHECK: = and(#1, lsr(r0, #2))
+; CHECK: = and(#1,lsr(r0,#2))
define i32 @test94(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -473,7 +473,7 @@ entry:
}
; CHECK-LABEL: @test95
-; CHECK: = or(#1, lsr(r0, #2))
+; CHECK: = or(#1,lsr(r0,#2))
define i32 @test95(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -481,7 +481,7 @@ entry:
}
; CHECK-LABEL: @test96
-; CHECK: = add(#1, lsr(r0, #2))
+; CHECK: = add(#1,lsr(r0,#2))
define i32 @test96(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -489,7 +489,7 @@ entry:
}
; CHECK-LABEL: @test97
-; CHECK: = sub(#1, lsr(r0, #2))
+; CHECK: = sub(#1,lsr(r0,#2))
define i32 @test97(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -497,7 +497,7 @@ entry:
}
; CHECK-LABEL: @test100
-; CHECK: = bitsplit(r0, r1)
+; CHECK: = bitsplit(r0,r1)
define i64 @test100(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A4.bitsplit(i32 %Rs, i32 %Rt)
@@ -505,7 +505,7 @@ entry:
}
; CHECK-LABEL: @test101
-; CHECK: = modwrap(r0, r1)
+; CHECK: = modwrap(r0,r1)
define i32 @test101(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.modwrapu(i32 %Rs, i32 %Rt)
@@ -513,7 +513,7 @@ entry:
}
; CHECK-LABEL: @test102
-; CHECK: = parity(r1:0, r3:2)
+; CHECK: = parity(r1:0,r3:2)
define i32 @test102(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S2.parityp(i64 %Rs, i64 %Rt)
@@ -521,7 +521,7 @@ entry:
}
; CHECK-LABEL: @test103
-; CHECK: = parity(r0, r1)
+; CHECK: = parity(r0,r1)
define i32 @test103(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.parity(i32 %Rs, i32 %Rt)
diff --git a/test/CodeGen/Hexagon/args.ll b/test/CodeGen/Hexagon/args.ll
index 3bfb8b15955..a1c7bc3230d 100644
--- a/test/CodeGen/Hexagon/args.ll
+++ b/test/CodeGen/Hexagon/args.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r5:4 = combine(#6, #5)
-; CHECK: r3:2 = combine(#4, #3)
-; CHECK: r1:0 = combine(#2, #1)
-; CHECK: memw(r29+#0)=#7
+; CHECK: r5:4 = combine(#6,#5)
+; CHECK: r3:2 = combine(#4,#3)
+; CHECK: r1:0 = combine(#2,#1)
+; CHECK: memw(r29+#0) = #7
define void @foo() nounwind {
diff --git a/test/CodeGen/Hexagon/bit-eval.ll b/test/CodeGen/Hexagon/bit-eval.ll
index 1d2be5bfc19..5b0111dfcd1 100644
--- a/test/CodeGen/Hexagon/bit-eval.ll
+++ b/test/CodeGen/Hexagon/bit-eval.ll
@@ -20,7 +20,7 @@ entry:
}
; CHECK-LABEL: test3:
-; CHECK: r1:0 = combine(#0, #1)
+; CHECK: r1:0 = combine(#0,#1)
define i64 @test3() #0 {
entry:
%0 = tail call i64 @llvm.hexagon.S4.extractp(i64 -1, i32 63, i32 63)
diff --git a/test/CodeGen/Hexagon/bit-skip-byval.ll b/test/CodeGen/Hexagon/bit-skip-byval.ll
index d6c1aad9400..9ee4014ae34 100644
--- a/test/CodeGen/Hexagon/bit-skip-byval.ll
+++ b/test/CodeGen/Hexagon/bit-skip-byval.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
;
; Either and or zxtb.
-; CHECK: r0 = and(r1, #255)
+; CHECK: r0 = and(r1,#255)
%struct.t0 = type { i32 }
diff --git a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
index a56680bd439..e09f7986621 100644
--- a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
+++ b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
@@ -3,7 +3,7 @@
; Check that the testcase compiles successfully. Expect that if-conversion
; took place.
; CHECK-LABEL: fred:
-; CHECK: if (!p0) r1 = memw(r0 + #0)
+; CHECK: if (!p0) r1 = memw(r0+#0)
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/brev_ld.ll b/test/CodeGen/Hexagon/brev_ld.ll
index a2914296ec4..861da32b981 100644
--- a/test/CodeGen/Hexagon/brev_ld.ll
+++ b/test/CodeGen/Hexagon/brev_ld.ll
@@ -29,7 +29,7 @@ entry:
%1 = bitcast i64* %inputLR to i8*
%sub = sub i32 13, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
%2 = call i8* @llvm.hexagon.brev.ldd(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i64*
%4 = load i64, i64* %3, align 8, !tbaa !0
@@ -49,7 +49,7 @@ entry:
%1 = bitcast i32* %inputLR to i8*
%sub = sub i32 14, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
%2 = call i8* @llvm.hexagon.brev.ldw(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i32*
%4 = load i32, i32* %3, align 4, !tbaa !2
@@ -69,7 +69,7 @@ entry:
%1 = bitcast i16* %inputLR to i8*
%sub = sub i32 15, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memh(r{{[0-9]*}} ++ m0:brev)
+; CHECK: = memh(r{{[0-9]*}}++m0:brev)
%2 = call i8* @llvm.hexagon.brev.ldh(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !3
@@ -89,7 +89,7 @@ entry:
%1 = bitcast i16* %inputLR to i8*
%sub = sub i32 15, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memuh(r{{[0-9]*}} ++ m0:brev)
+; CHECK: = memuh(r{{[0-9]*}}++m0:brev)
%2 = call i8* @llvm.hexagon.brev.lduh(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !3
@@ -108,7 +108,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memub(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memub(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = call i8* @llvm.hexagon.brev.ldub(i8* %0, i8* %inputLR, i32 %shl)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
@@ -126,7 +126,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = call i8* @llvm.hexagon.brev.ldb(i8* %0, i8* %inputLR, i32 %shl)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
diff --git a/test/CodeGen/Hexagon/brev_st.ll b/test/CodeGen/Hexagon/brev_st.ll
index 6c55681a683..cee5f52e3e4 100644
--- a/test/CodeGen/Hexagon/brev_st.ll
+++ b/test/CodeGen/Hexagon/brev_st.ll
@@ -26,7 +26,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 13, %shr2
%shl = shl i32 1, %sub
-; CHECK: memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.std(i8* %0, i64 undef, i32 %shl)
ret i64 0
}
@@ -42,7 +42,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 14, %shr1
%shl = shl i32 1, %sub
-; CHECK: memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.stw(i8* %0, i32 undef, i32 %shl)
ret i32 0
}
@@ -58,7 +58,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 15, %shr2
%shl = shl i32 1, %sub
-; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.sth(i8* %0, i32 0, i32 %shl)
ret i16 0
}
@@ -74,7 +74,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 15, %shr2
%shl = shl i32 1, %sub
-; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev){{ *}}={{ *}}r{{[0-9]*}}.h
+; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev) = r{{[0-9]*}}.h
%1 = tail call i8* @llvm.hexagon.brev.sthhi(i8* %0, i32 0, i32 %shl)
ret i16 0
}
@@ -89,7 +89,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr2
- ; CHECK: memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+ ; CHECK: memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
%shl = shl i32 1, %sub
%1 = tail call i8* @llvm.hexagon.brev.stb(i8* %0, i32 0, i32 %shl)
ret i8 0
diff --git a/test/CodeGen/Hexagon/cext-valid-packet1.ll b/test/CodeGen/Hexagon/cext-valid-packet1.ll
index 36abc59f5e3..b0aa3c16f86 100644
--- a/test/CodeGen/Hexagon/cext-valid-packet1.ll
+++ b/test/CodeGen/Hexagon/cext-valid-packet1.ll
@@ -3,8 +3,8 @@
; Check that the packetizer generates valid packets with constant
; extended instructions.
; CHECK: {
-; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}})
-; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}})
+; CHECK-NEXT: r{{[0-9]+}} = add(r{{[0-9]+}},##{{[0-9]+}})
+; CHECK-NEXT: r{{[0-9]+}} = add(r{{[0-9]+}},##{{[0-9]+}})
; CHECK-NEXT: }
define i32 @check-packet1(i32 %a, i32 %b, i32 %c) nounwind readnone {
diff --git a/test/CodeGen/Hexagon/circ_ld.ll b/test/CodeGen/Hexagon/circ_ld.ll
index ffa5f2cd222..a9b367e9c4e 100644
--- a/test/CodeGen/Hexagon/circ_ld.ll
+++ b/test/CodeGen/Hexagon/circ_ld.ll
@@ -26,7 +26,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr1, 33554432
-; CHECK: = memb(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}}))
+; CHECK: = memb(r{{[0-9]*}}++#-1:circ(m{{[0-1]}}))
%1 = call i8* @llvm.hexagon.circ.ldb(i8* %0, i8* %inputLR, i32 %or, i32 -1)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
@@ -45,7 +45,7 @@ entry:
%1 = bitcast i64* %inputLR to i8*
%shl = shl nuw nsw i32 %shr1, 3
%or = or i32 %shl, 83886080
-; CHECK: = memd(r{{[0-9]*.}}++{{.}}#-8:circ(m{{[0-1]}}))
+; CHECK: = memd(r{{[0-9]*}}++#-8:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.ldd(i8* %0, i8* %1, i32 %or, i32 -8)
%3 = bitcast i8* %1 to i64*
%4 = load i64, i64* %3, align 8, !tbaa !0
@@ -64,7 +64,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i16* %inputLR to i8*
%or = or i32 %shr1, 50331648
-; CHECK: = memh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}}))
+; CHECK: = memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.ldh(i8* %0, i8* %1, i32 %or, i32 -2)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !2
@@ -82,7 +82,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr1, 33554432
-; CHECK: = memub(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}}))
+; CHECK: = memub(r{{[0-9]*}}++#-1:circ(m{{[0-1]}}))
%1 = call i8* @llvm.hexagon.circ.ldub(i8* %0, i8* %inputLR, i32 %or, i32 -1)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
@@ -100,7 +100,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i16* %inputLR to i8*
%or = or i32 %shr1, 50331648
-; CHECK: = memuh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}}))
+; CHECK: = memuh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.lduh(i8* %0, i8* %1, i32 %or, i32 -2)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !2
@@ -120,7 +120,7 @@ entry:
%1 = bitcast i32* %inputLR to i8*
%shl = shl nuw nsw i32 %shr1, 2
%or = or i32 %shl, 67108864
-; CHECK: = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m{{[0-1]}}))
+; CHECK: = memw(r{{[0-9]*}}++#-4:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 %or, i32 -4)
%3 = bitcast i8* %1 to i32*
%4 = load i32, i32* %3, align 4, !tbaa !3
diff --git a/test/CodeGen/Hexagon/circ_ldw.ll b/test/CodeGen/Hexagon/circ_ldw.ll
index 4511a9cf69d..abfb0886c68 100644
--- a/test/CodeGen/Hexagon/circ_ldw.ll
+++ b/test/CodeGen/Hexagon/circ_ldw.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
-; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m0))
+; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*}}++#-4:circ(m0))
%union.vect64 = type { i64 }
diff --git a/test/CodeGen/Hexagon/circ_st.ll b/test/CodeGen/Hexagon/circ_st.ll
index 4b54afbc611..c8fa256ad48 100644
--- a/test/CodeGen/Hexagon/circ_st.ll
+++ b/test/CodeGen/Hexagon/circ_st.ll
@@ -23,7 +23,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr2, 33554432
-; CHECK: memb(r{{[0-9]*}}{{.}}++{{.}}#-1:circ(m{{[0-1]}}))
+; CHECK: memb(r{{[0-9]*}}++#-1:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.stb(i8* %0, i32 0, i32 %or, i32 -1)
ret i8 0
}
@@ -39,7 +39,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%shl = shl nuw nsw i32 %shr1, 3
%or = or i32 %shl, 83886080
-; CHECK: memd(r{{[0-9]*}}{{.}}++{{.}}#-8:circ(m{{[0-1]}}))
+; CHECK: memd(r{{[0-9]*}}++#-8:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.std(i8* %0, i64 undef, i32 %or, i32 -8)
ret i64 0
}
@@ -54,7 +54,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr2, 50331648
-; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}}))
+; CHECK: memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.sth(i8* %0, i32 0, i32 %or, i32 -2)
ret i16 0
}
@@ -69,7 +69,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr2, 50331648
-; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})){{ *}}={{ *}}r{{[0-9]*}}.h
+; CHECK: memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}})) = r{{[0-9]*}}.h
%1 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %0, i32 0, i32 %or, i32 -2)
ret i16 0
}
@@ -85,7 +85,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%shl = shl nuw nsw i32 %shr1, 2
%or = or i32 %shl, 67108864
-; CHECK: memw(r{{[0-9]*}}{{.}}++{{.}}#-4:circ(m{{[0-1]}}))
+; CHECK: memw(r{{[0-9]*}}++#-4:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.stw(i8* %0, i32 undef, i32 %or, i32 -4)
ret i32 0
}
diff --git a/test/CodeGen/Hexagon/clr_set_toggle.ll b/test/CodeGen/Hexagon/clr_set_toggle.ll
index 19e3ed0cf89..4e983831652 100644
--- a/test/CodeGen/Hexagon/clr_set_toggle.ll
+++ b/test/CodeGen/Hexagon/clr_set_toggle.ll
@@ -4,7 +4,7 @@
define i32 @my_clrbit(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -15,7 +15,7 @@ entry:
define i64 @my_clrbit2(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit2
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -26,7 +26,7 @@ entry:
define i64 @my_clrbit3(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit3
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -37,7 +37,7 @@ entry:
define i32 @my_clrbit4(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit4
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -48,7 +48,7 @@ entry:
define i64 @my_clrbit5(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit5
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -59,7 +59,7 @@ entry:
define i64 @my_clrbit6(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit6
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #27)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#27)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -70,7 +70,7 @@ entry:
define zeroext i16 @my_setbit(i16 zeroext %crc) nounwind {
entry:
; CHECK-LABEL: my_setbit
-; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}}){{ *}}={{ *}}setbit(#15)
+; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}}) = setbit(#15)
%crc.addr = alloca i16, align 2
store i16 %crc, i16* %crc.addr, align 2
%0 = load i16, i16* %crc.addr, align 2
@@ -85,7 +85,7 @@ entry:
define i32 @my_setbit2(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit2
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -96,7 +96,7 @@ entry:
define i64 @my_setbit3(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit3
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -107,7 +107,7 @@ entry:
define i32 @my_setbit4(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit4
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#31)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -118,7 +118,7 @@ entry:
define i64 @my_setbit5(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit5
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #13)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#13)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -129,7 +129,7 @@ entry:
define zeroext i16 @my_togglebit(i16 zeroext %crc) nounwind {
entry:
; CHECK-LABEL: my_togglebit
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
%crc.addr = alloca i16, align 2
store i16 %crc, i16* %crc.addr, align 2
%0 = load i16, i16* %crc.addr, align 2
@@ -144,7 +144,7 @@ entry:
define i32 @my_togglebit2(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_togglebit2
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -155,7 +155,7 @@ entry:
define i64 @my_togglebit3(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_togglebit3
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -166,7 +166,7 @@ entry:
define i64 @my_togglebit4(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_togglebit4
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #20)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#20)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
diff --git a/test/CodeGen/Hexagon/cmp.ll b/test/CodeGen/Hexagon/cmp.ll
index c274a787249..a0bb90de1c2 100644
--- a/test/CodeGen/Hexagon/cmp.ll
+++ b/test/CodeGen/Hexagon/cmp.ll
@@ -9,7 +9,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpeq(i32 %0, i32 1)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpeq(i32, i32) #1
@@ -23,7 +23,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgt(i32 %0, i32 2)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgt(i32, i32) #1
@@ -37,7 +37,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgtu(i32 %0, i32 3)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgtu(i32, i32) #1
@@ -51,7 +51,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmplt(i32 %0, i32 4)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmplt(i32, i32) #1
@@ -65,7 +65,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpltu(i32 %0, i32 5)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpltu(i32, i32) #1
@@ -79,7 +79,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpeqi(i32 %0, i32 10)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, {{.*}}#10)
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},#10)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpeqi(i32, i32) #1
@@ -93,7 +93,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgti(i32 %0, i32 20)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#20)
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},#20)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgti(i32, i32) #1
@@ -107,7 +107,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgtui(i32 %0, i32 40)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#40)
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},#40)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgtui(i32, i32) #1
@@ -121,7 +121,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgei(i32 %0, i32 3)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#2)
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},#2)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgei(i32, i32) #1
@@ -135,7 +135,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 3)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#2)
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},#2)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgeui(i32, i32) #1
@@ -149,7 +149,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 0)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},r{{[0-9]}})
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/combine.ll b/test/CodeGen/Hexagon/combine.ll
index 04a080fdf42..5b71b366566 100644
--- a/test/CodeGen/Hexagon/combine.ll
+++ b/test/CodeGen/Hexagon/combine.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr -hexagon-bit=0 < %s | FileCheck %s
-; CHECK: combine(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}})
@j = external global i32
@k = external global i64
diff --git a/test/CodeGen/Hexagon/constp-combine-neg.ll b/test/CodeGen/Hexagon/constp-combine-neg.ll
index 18f0e81076a..089d9f6a998 100644
--- a/test/CodeGen/Hexagon/constp-combine-neg.ll
+++ b/test/CodeGen/Hexagon/constp-combine-neg.ll
@@ -19,9 +19,9 @@ entry:
; The instructions seem to be in a different order in the .s file than
; the corresponding values in the .ll file, so just run the test three
; times and each time test for a different instruction.
-; CHECK-TEST1: combine(#-2, #3)
-; CHECK-TEST2: combine(#6, #-4)
-; CHECK-TEST3: combine(#-10, #-8)
+; CHECK-TEST1: combine(#-2,#3)
+; CHECK-TEST2: combine(#6,#-4)
+; CHECK-TEST3: combine(#-10,#-8)
attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll b/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll
index b8f483298f8..c8b1b0a2ca0 100644
--- a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll
+++ b/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll
@@ -4,7 +4,7 @@
; CHECK-DAG: cl0({{r[0-9]*:[0-9]*}})
; CHECK-DAG: ct0({{r[0-9]*}})
; CHECK-DAG: cl0({{r[0-9]*}})
-; CHECK-DAG: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #4)
+; CHECK-DAG: r{{[0-9]+}} += lsr(r{{[0-9]+}},#4)
define i32 @foo(i64 %a, i32 %b) nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/dead-store-stack.ll b/test/CodeGen/Hexagon/dead-store-stack.ll
index 93d324baad9..0d8124e76b9 100644
--- a/test/CodeGen/Hexagon/dead-store-stack.ll
+++ b/test/CodeGen/Hexagon/dead-store-stack.ll
@@ -1,6 +1,6 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
; CHECK: ParseFunc:
-; CHECK: r[[ARG0:[0-9]+]] = memuh(r[[ARG1:[0-9]+]] + #[[OFFSET:[0-9]+]])
+; CHECK: r[[ARG0:[0-9]+]] = memuh(r[[ARG1:[0-9]+]]+#[[OFFSET:[0-9]+]])
; CHECK: memw(r[[ARG1]]+#[[OFFSET]]) = r[[ARG0]]
@.str.3 = external unnamed_addr constant [8 x i8], align 1
diff --git a/test/CodeGen/Hexagon/eh_return.ll b/test/CodeGen/Hexagon/eh_return.ll
index 67649a07afc..1596ade24c8 100644
--- a/test/CodeGen/Hexagon/eh_return.ll
+++ b/test/CodeGen/Hexagon/eh_return.ll
@@ -4,7 +4,7 @@
; CHECK: deallocframe
; CHECK-NEXT: }
; CHECK-NEXT: {
-; CHECK-NEXT: r29 = add(r29, r28)
+; CHECK-NEXT: r29 = add(r29,r28)
; CHECK-NEXT: }
; CHECK-NEXT: {
; CHECK-NEXT: jumpr r31
diff --git a/test/CodeGen/Hexagon/extload-combine.ll b/test/CodeGen/Hexagon/extload-combine.ll
index c492343d791..c7a386a664b 100644
--- a/test/CodeGen/Hexagon/extload-combine.ll
+++ b/test/CodeGen/Hexagon/extload-combine.ll
@@ -15,8 +15,8 @@
; Function Attrs: nounwind
define i64 @short_test1() #0 {
-; CHECK: [[VAR:r[0-9]+]]{{ *}}={{ *}}memuh(##
-; CHECK: combine(#0, [[VAR]])
+; CHECK: [[VAR:r[0-9]+]] = memuh(##
+; CHECK: combine(#0,[[VAR]])
entry:
store i16 0, i16* @a, align 2
%0 = load i16, i16* @b, align 2
@@ -26,7 +26,7 @@ entry:
; Function Attrs: nounwind
define i64 @short_test2() #0 {
-; CHECK: [[VAR1:r[0-9]+]]{{ *}}={{ *}}memh(##
+; CHECK: [[VAR1:r[0-9]+]] = memh(##
; CHECK: sxtw([[VAR1]])
entry:
store i16 0, i16* @a, align 2
@@ -37,8 +37,8 @@ entry:
; Function Attrs: nounwind
define i64 @char_test1() #0 {
-; CHECK: [[VAR2:r[0-9]+]]{{ *}}={{ *}}memub(##
-; CHECK: combine(#0, [[VAR2]])
+; CHECK: [[VAR2:r[0-9]+]] = memub(##
+; CHECK: combine(#0,[[VAR2]])
entry:
store i8 0, i8* @char_a, align 1
%0 = load i8, i8* @char_b, align 1
@@ -48,7 +48,7 @@ entry:
; Function Attrs: nounwind
define i64 @char_test2() #0 {
-; CHECK: [[VAR3:r[0-9]+]]{{ *}}={{ *}}memb(##
+; CHECK: [[VAR3:r[0-9]+]] = memb(##
; CHECK: sxtw([[VAR3]])
entry:
store i8 0, i8* @char_a, align 1
@@ -59,8 +59,8 @@ entry:
; Function Attrs: nounwind
define i64 @int_test1() #0 {
-; CHECK: [[VAR4:r[0-9]+]]{{ *}}={{ *}}memw(##
-; CHECK: combine(#0, [[VAR4]])
+; CHECK: [[VAR4:r[0-9]+]] = memw(##
+; CHECK: combine(#0,[[VAR4]])
entry:
store i32 0, i32* @int_a, align 4
%0 = load i32, i32* @int_b, align 4
@@ -70,7 +70,7 @@ entry:
; Function Attrs: nounwind
define i64 @int_test2() #0 {
-; CHECK: [[VAR5:r[0-9]+]]{{ *}}={{ *}}memw(##
+; CHECK: [[VAR5:r[0-9]+]] = memw(##
; CHECK: sxtw([[VAR5]])
entry:
store i32 0, i32* @int_a, align 4
diff --git a/test/CodeGen/Hexagon/extract-basic.ll b/test/CodeGen/Hexagon/extract-basic.ll
index c75125cedd3..ad118dea0ab 100644
--- a/test/CodeGen/Hexagon/extract-basic.ll
+++ b/test/CodeGen/Hexagon/extract-basic.ll
@@ -1,8 +1,8 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK-DAG: extractu(r{{[0-9]*}}, #3, #4)
-; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #7)
-; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #16)
+; CHECK-DAG: extractu(r{{[0-9]*}},#3,#4)
+; CHECK-DAG: extractu(r{{[0-9]*}},#8,#7)
+; CHECK-DAG: extractu(r{{[0-9]*}},#8,#16)
; C source:
; typedef struct {
diff --git a/test/CodeGen/Hexagon/fadd.ll b/test/CodeGen/Hexagon/fadd.ll
index 6cf0fbbccf7..0418c1724f5 100644
--- a/test/CodeGen/Hexagon/fadd.ll
+++ b/test/CodeGen/Hexagon/fadd.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate sp floating point add in V5.
-; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}},r{{[0-9]+}})
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/float-amode.ll b/test/CodeGen/Hexagon/float-amode.ll
index 5f13e048f9c..d770582ecab 100644
--- a/test/CodeGen/Hexagon/float-amode.ll
+++ b/test/CodeGen/Hexagon/float-amode.ll
@@ -12,9 +12,9 @@
@a = common global float 0.000000e+00, align 4
; CHECK-LABEL: test1
-; CHECK: [[REG11:(r[0-9]+)]]{{ *}}={{ *}}memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2)
+; CHECK: [[REG11:(r[0-9]+)]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2)
; CHECK: [[REG12:(r[0-9]+)]] += sfmpy({{.*}}[[REG11]]
-; CHECK: memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2) = [[REG12]].new
+; CHECK: memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) = [[REG12]].new
; Function Attrs: norecurse nounwind
define void @test1(%struct.matrix_params* nocapture readonly %params, i32 %col1) {
@@ -35,7 +35,7 @@ entry:
}
; CHECK-LABEL: test2
-; CHECK: [[REG21:(r[0-9]+)]]{{ *}}={{ *}}memw(##globB+92)
+; CHECK: [[REG21:(r[0-9]+)]] = memw(##globB+92)
; CHECK: [[REG22:(r[0-9]+)]] = sfadd({{.*}}[[REG21]]
; CHECK: memw(##globA+84) = [[REG22]]
@@ -54,7 +54,7 @@ entry:
}
; CHECK-LABEL: test3
-; CHECK: [[REG31:(r[0-9]+)]]{{ *}}={{ *}}memw(gp+#b)
+; CHECK: [[REG31:(r[0-9]+)]] = memw(gp+#b)
; CHECK: [[REG32:(r[0-9]+)]] = sfadd({{.*}}[[REG31]]
; CHECK: memw(gp+#a) = [[REG32]]
@@ -73,9 +73,9 @@ entry:
}
; CHECK-LABEL: test4
-; CHECK: [[REG41:(r[0-9]+)]]{{ *}}={{ *}}memw(r0<<#2 + ##globB+52)
+; CHECK: [[REG41:(r[0-9]+)]] = memw(r0<<#2+##globB+52)
; CHECK: [[REG42:(r[0-9]+)]] = sfadd({{.*}}[[REG41]]
-; CHECK: memw(r0<<#2 + ##globA+60) = [[REG42]]
+; CHECK: memw(r0<<#2+##globA+60) = [[REG42]]
; Function Attrs: noinline norecurse nounwind
define void @test4(i32 %col1) {
entry:
diff --git a/test/CodeGen/Hexagon/fmul.ll b/test/CodeGen/Hexagon/fmul.ll
index 4f55d0bec47..552f98ec7a5 100644
--- a/test/CodeGen/Hexagon/fmul.ll
+++ b/test/CodeGen/Hexagon/fmul.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate single precision floating point multiply in V5.
-; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}},r{{[0-9]+}})
define i32 @main() nounwind {
diff --git a/test/CodeGen/Hexagon/fsel.ll b/test/CodeGen/Hexagon/fsel.ll
index 247249da50b..a2f0b4a47f1 100644
--- a/test/CodeGen/Hexagon/fsel.ll
+++ b/test/CodeGen/Hexagon/fsel.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; CHECK-LABEL: danny:
-; CHECK: mux(p0, r1, ##1065353216)
+; CHECK: mux(p0,r1,##1065353216)
define float @danny(i32 %x, float %f) #0 {
%t = icmp sgt i32 %x, 0
@@ -10,7 +10,7 @@ define float @danny(i32 %x, float %f) #0 {
}
; CHECK-LABEL: sammy:
-; CHECK: mux(p0, ##1069547520, r1)
+; CHECK: mux(p0,##1069547520,r1)
define float @sammy(i32 %x, float %f) #0 {
%t = icmp sgt i32 %x, 0
diff --git a/test/CodeGen/Hexagon/fsub.ll b/test/CodeGen/Hexagon/fsub.ll
index ca7bdc4d0b3..d7b0e2f65b3 100644
--- a/test/CodeGen/Hexagon/fsub.ll
+++ b/test/CodeGen/Hexagon/fsub.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate sp floating point subtract in V5.
-; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}},r{{[0-9]+}})
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/fusedandshift.ll b/test/CodeGen/Hexagon/fusedandshift.ll
index 414574aec40..0310d440ffe 100644
--- a/test/CodeGen/Hexagon/fusedandshift.ll
+++ b/test/CodeGen/Hexagon/fusedandshift.ll
@@ -2,7 +2,7 @@
; Check that we generate fused logical and with shift instruction.
; Disable "extract" generation, since it may eliminate the and/lsr.
-; CHECK: r{{[0-9]+}} = and(#15, lsr(r{{[0-9]+}}, #{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = and(#15,lsr(r{{[0-9]+}},#{{[0-9]+}})
define i32 @main(i16* %a, i16* %b) nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/hwloop-cleanup.ll b/test/CodeGen/Hexagon/hwloop-cleanup.ll
index c04966a5a4b..56a6fedf81e 100644
--- a/test/CodeGen/Hexagon/hwloop-cleanup.ll
+++ b/test/CodeGen/Hexagon/hwloop-cleanup.ll
@@ -5,7 +5,7 @@
; Bug 6685.
; CHECK: loop0
-; CHECK-NOT: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#-1)
+; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},#-1)
; CHECK-NOT: cmp.eq
; CHECK: endloop0
@@ -39,7 +39,7 @@ for.end:
; This test checks that that initial loop count value is removed.
; CHECK-NOT: ={{.}}#40
; CHECK: loop0
-; CHECK-NOT: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#-1)
+; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},#-1)
; CHECK-NOT: cmp.eq
; CHECK: endloop0
@@ -64,7 +64,7 @@ for.end:
; This test checks that we don't remove the induction variable since it's used.
; CHECK: loop0
-; CHECK: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#1)
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}},#1)
; CHECK-NOT: cmp.eq
; CHECK: endloop0
define i32 @test3(i32* nocapture %b) nounwind {
diff --git a/test/CodeGen/Hexagon/hwloop-loop1.ll b/test/CodeGen/Hexagon/hwloop-loop1.ll
index 238d34e7ea1..7508b947f1c 100644
--- a/test/CodeGen/Hexagon/hwloop-loop1.ll
+++ b/test/CodeGen/Hexagon/hwloop-loop1.ll
@@ -2,8 +2,8 @@
;
; Generate loop1 instruction for double loop sequence.
-; CHECK: loop1(.LBB{{.}}_{{.}}, #100)
-; CHECK: loop0(.LBB{{.}}_{{.}}, #100)
+; CHECK: loop1(.LBB{{.}}_{{.}},#100)
+; CHECK: loop0(.LBB{{.}}_{{.}},#100)
; CHECK: endloop0
; CHECK: endloop1
diff --git a/test/CodeGen/Hexagon/hwloop1.ll b/test/CodeGen/Hexagon/hwloop1.ll
index 68af3b34eee..7a805d951b9 100644
--- a/test/CodeGen/Hexagon/hwloop1.ll
+++ b/test/CodeGen/Hexagon/hwloop1.ll
@@ -3,7 +3,7 @@
; Case 1 : Loop with a constant number of iterations.
; CHECK-LABEL: @hwloop1
-; CHECK: loop0(.LBB{{.}}_{{.}}, #10)
+; CHECK: loop0(.LBB{{.}}_{{.}},#10)
; CHECK: endloop0
@a = common global [10 x i32] zeroinitializer, align 4
@@ -23,7 +23,7 @@ for.end:
; Case 2 : Loop with a run-time number of iterations.
; CHECK-LABEL: @hwloop2
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop2(i32 %n, i32* nocapture %b) nounwind {
@@ -54,8 +54,8 @@ for.end:
; Case 3 : Induction variable increment more than 1.
; CHECK-LABEL: @hwloop3
-; CHECK: lsr(r{{[0-9]+}}, #2)
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: lsr(r{{[0-9]+}},#2)
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop3(i32 %n, i32* nocapture %b) nounwind {
@@ -86,7 +86,7 @@ for.end:
; Case 4 : Loop exit compare uses register instead of immediate value.
; CHECK-LABEL: @hwloop4
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop4(i32 %n, i32* nocapture %b) nounwind {
@@ -114,7 +114,7 @@ for.end:
; Case 5: After LSR, the initial value is 100 and the iv decrements to 0.
; CHECK-LABEL: @hwloop5
-; CHECK: loop0(.LBB{{.}}_{{.}}, #100)
+; CHECK: loop0(.LBB{{.}}_{{.}},#100)
; CHECK: endloop0
define void @hwloop5(i32* nocapture %a, i32* nocapture %res) nounwind {
@@ -138,8 +138,8 @@ for.end:
; Case 6: Large immediate offset
; CHECK-LABEL: @hwloop6
-; CHECK-NOT: loop0(.LBB{{.}}_{{.}}, #1024)
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK-NOT: loop0(.LBB{{.}}_{{.}},#1024)
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define void @hwloop6(i32* nocapture %a, i32* nocapture %res) nounwind {
diff --git a/test/CodeGen/Hexagon/hwloop2.ll b/test/CodeGen/Hexagon/hwloop2.ll
index d411d979904..ba3de1f1a2a 100644
--- a/test/CodeGen/Hexagon/hwloop2.ll
+++ b/test/CodeGen/Hexagon/hwloop2.ll
@@ -2,7 +2,7 @@
; Test for multiple phis with induction variables.
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop4(i32* nocapture %s, i32* nocapture %a, i32 %n) {
diff --git a/test/CodeGen/Hexagon/hwloop4.ll b/test/CodeGen/Hexagon/hwloop4.ll
index d159c45e3fb..b8cea4c7772 100644
--- a/test/CodeGen/Hexagon/hwloop4.ll
+++ b/test/CodeGen/Hexagon/hwloop4.ll
@@ -2,9 +2,9 @@
;
; Remove the unnecessary 'add' instruction used for the hardware loop setup.
-; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]], #-[[OP2:[0-9]+]]
-; CHECK-NOT: add([[OP0]], #[[OP2]])
-; CHECK: lsr([[OP1]], #{{[0-9]+}})
+; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]],#-[[OP2:[0-9]+]]
+; CHECK-NOT: add([[OP0]],#[[OP2]])
+; CHECK: lsr([[OP1]],#{{[0-9]+}})
; CHECK: loop0
define void @matrix_mul_matrix(i32 %N, i32* nocapture %C, i16* nocapture readnone %A, i16* nocapture readnone %B) #0 {
diff --git a/test/CodeGen/Hexagon/hwloop5.ll b/test/CodeGen/Hexagon/hwloop5.ll
index 0886b03cc75..f4990dabebb 100644
--- a/test/CodeGen/Hexagon/hwloop5.ll
+++ b/test/CodeGen/Hexagon/hwloop5.ll
@@ -2,9 +2,9 @@
;
; Generate hardware loop when unknown trip count loop is vectorized.
-; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}})
; CHECK: endloop0
-; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}})
; CHECK: endloop0
@A = common global [1000 x i32] zeroinitializer, align 8
diff --git a/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll b/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
index 68a5dc16ecf..cbc1c327e69 100644
--- a/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
+++ b/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
@@ -15,7 +15,7 @@ entry:
br i1 %cmp199, label %if.then200, label %if.else201
; CHECK-DAG: [[R4:r[0-9]+]] = #4
-; CHECK: p0 = cmp.eq(r0, #0)
+; CHECK: p0 = cmp.eq(r0,#0)
; CHECK: if (!p0.new) [[R3:r[0-9]+]] = #3
; CHECK-DAG: if (!p0) memh(##t) = [[R3]]
; CHECK-DAG: if (p0) memh(##t) = [[R4]]
diff --git a/test/CodeGen/Hexagon/insert-basic.ll b/test/CodeGen/Hexagon/insert-basic.ll
index e941c063d9e..14ee735abd7 100644
--- a/test/CodeGen/Hexagon/insert-basic.ll
+++ b/test/CodeGen/Hexagon/insert-basic.ll
@@ -1,8 +1,8 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK-DAG: insert(r{{[0-9]*}}, #17, #0)
-; CHECK-DAG: insert(r{{[0-9]*}}, #18, #0)
-; CHECK-DAG: insert(r{{[0-9]*}}, #22, #0)
-; CHECK-DAG: insert(r{{[0-9]*}}, #12, #0)
+; CHECK-DAG: insert(r{{[0-9]*}},#17,#0)
+; CHECK-DAG: insert(r{{[0-9]*}},#18,#0)
+; CHECK-DAG: insert(r{{[0-9]*}},#22,#0)
+; CHECK-DAG: insert(r{{[0-9]*}},#12,#0)
; C source:
; typedef struct {
diff --git a/test/CodeGen/Hexagon/insert4.ll b/test/CodeGen/Hexagon/insert4.ll
index c4d575dd406..3bc8e9e5798 100644
--- a/test/CodeGen/Hexagon/insert4.ll
+++ b/test/CodeGen/Hexagon/insert4.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
;
; Check that we no longer generate 4 inserts.
-; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l)
-; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l)
+; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l)
+; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l)
; CHECK-NOT: insert
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
index fcf80b08181..abdd4cba7c5 100644
--- a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
@@ -10,21 +10,21 @@ define i32 @A2_addi(i32 %a) {
%z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add({{.*}}, #0)
+; CHECK: = add({{.*}},#0)
declare i32 @llvm.hexagon.A2.add(i32, i32)
define i32 @A2_add(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, {{.*}})
+; CHECK: = add({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.addsat(i32, i32)
define i32 @A2_addsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, {{.*}}):sat
+; CHECK: = add({{.*}},{{.*}}):sat
; Logical operations
declare i32 @llvm.hexagon.A2.and(i32, i32)
@@ -32,35 +32,35 @@ define i32 @A2_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, {{.*}})
+; CHECK: = and({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.or(i32, i32)
define i32 @A2_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, {{.*}})
+; CHECK: = or({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.xor(i32, i32)
define i32 @A2_xor(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = xor({{.*}}, {{.*}})
+; CHECK: = xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.andn(i32, i32)
define i32 @A4_andn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, ~{{.*}})
+; CHECK: = and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.A4.orn(i32, i32)
define i32 @A4_orn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, ~{{.*}})
+; CHECK: = or({{.*}},~{{.*}})
; Subtract
declare i32 @llvm.hexagon.A2.sub(i32, i32)
@@ -68,14 +68,14 @@ define i32 @A2_sub(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}, {{.*}})
+; CHECK: = sub({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.subsat(i32, i32)
define i32 @A2_subsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}, {{.*}}):sat
+; CHECK: = sub({{.*}},{{.*}}):sat
; Sign extend
declare i32 @llvm.hexagon.A2.sxtb(i32)
@@ -128,21 +128,21 @@ define i32 @A2_svaddh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}})
+; CHECK: = vaddh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svaddhs(i32, i32)
define i32 @A2_svaddhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}}):sat
+; CHECK: = vaddh({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.A2.svadduhs(i32, i32)
define i32 @A2_svadduhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vadduh({{.*}}, {{.*}}):sat
+; CHECK: = vadduh({{.*}},{{.*}}):sat
; Vector average halfwords
declare i32 @llvm.hexagon.A2.svavgh(i32, i32)
@@ -150,21 +150,21 @@ define i32 @A2_svavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}})
+; CHECK: = vavgh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svavghs(i32, i32)
define i32 @A2_svavghs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}}):rnd
+; CHECK: = vavgh({{.*}},{{.*}}):rnd
declare i32 @llvm.hexagon.A2.svnavgh(i32, i32)
define i32 @A2_svnavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}})
+; CHECK: = vnavgh({{.*}},{{.*}})
; Vector subtract halfwords
declare i32 @llvm.hexagon.A2.svsubh(i32, i32)
@@ -172,21 +172,21 @@ define i32 @A2_svsubh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}})
+; CHECK: = vsubh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svsubhs(i32, i32)
define i32 @A2_svsubhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}}):sat
+; CHECK: = vsubh({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32)
define i32 @A2_svsubuhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vsubuh({{.*}}, {{.*}}):sat
+; CHECK: = vsubuh({{.*}},{{.*}}):sat
; Zero extend
declare i32 @llvm.hexagon.A2.zxth(i32)
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
index c9fb0afe078..554dac4563d 100644
--- a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
@@ -10,56 +10,56 @@ define i64 @A4_combineri(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0)
ret i64 %z
}
-; CHECK: = combine({{.*}}, #0)
+; CHECK: = combine({{.*}},#0)
declare i64 @llvm.hexagon.A4.combineir(i32, i32)
define i64 @A4_combineir(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a)
ret i64 %z
}
-; CHECK: = combine(#0, {{.*}})
+; CHECK: = combine(#0,{{.*}})
declare i64 @llvm.hexagon.A2.combineii(i32, i32)
define i64 @A2_combineii() {
%z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0)
ret i64 %z
}
-; CHECK: = combine(#0, #0)
+; CHECK: = combine(#0,#0)
declare i32 @llvm.hexagon.A2.combine.hh(i32, i32)
define i32 @A2_combine_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.hl(i32, i32)
define i32 @A2_combine_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.lh(i32, i32)
define i32 @A2_combine_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.ll(i32, i32)
define i32 @A2_combine_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.combinew(i32, i32)
define i64 @A2_combinew(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
; Mux
declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32)
@@ -67,21 +67,21 @@ define i32 @C2_muxri(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: = mux({{.*}}, #0, {{.*}})
+; CHECK: = mux({{.*}},#0,{{.*}})
declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32)
define i32 @C2_muxir(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = mux({{.*}}, {{.*}}, #0)
+; CHECK: = mux({{.*}},{{.*}},#0)
declare i32 @llvm.hexagon.C2.mux(i32, i32, i32)
define i32 @C2_mux(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = mux({{.*}}, {{.*}}, {{.*}})
+; CHECK: = mux({{.*}},{{.*}},{{.*}})
; Shift word by 16
declare i32 @llvm.hexagon.A2.aslh(i32)
@@ -104,4 +104,4 @@ define i64 @S2_packhl(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = packhl({{.*}}, {{.*}})
+; CHECK: = packhl({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/cr.ll b/test/CodeGen/Hexagon/intrinsics/cr.ll
index f308ef8e566..4c0fcb3707c 100644
--- a/test/CodeGen/Hexagon/intrinsics/cr.ll
+++ b/test/CodeGen/Hexagon/intrinsics/cr.ll
@@ -10,14 +10,14 @@ define i32 @C4_fastcorner9(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = fastcorner9({{.*}}, {{.*}})
+; CHECK: = fastcorner9({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32)
define i32 @C4_fastcorner9_not(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !fastcorner9({{.*}}, {{.*}})
+; CHECK: = !fastcorner9({{.*}},{{.*}})
; Logical reductions on predicates
declare i32 @llvm.hexagon.C2.any8(i32)
@@ -41,70 +41,70 @@ define i32 @C2_and(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, {{.*}})
+; CHECK: = and({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32)
define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, and({{.*}}, {{.*}}))
+; CHECK: = and({{.*}},and({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.or(i32, i32)
define i32 @C2_or(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, {{.*}})
+; CHECK: = or({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32)
define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, or({{.*}}, {{.*}}))
+; CHECK: = and({{.*}},or({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.xor(i32, i32)
define i32 @C2_xor(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = xor({{.*}}, {{.*}})
+; CHECK: = xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32)
define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, and({{.*}}, {{.*}}))
+; CHECK: = or({{.*}},and({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.andn(i32, i32)
define i32 @C2_andn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, !{{.*}})
+; CHECK: = and({{.*}},!{{.*}})
declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32)
define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, or({{.*}}, {{.*}}))
+; CHECK: = or({{.*}},or({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32)
define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, and({{.*}}, !{{.*}}))
+; CHECK: = and({{.*}},and({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32)
define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, or({{.*}}, !{{.*}}))
+; CHECK: = and({{.*}},or({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C2.not(i32)
define i32 @C2_not(i32 %a) {
@@ -118,18 +118,18 @@ define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, and({{.*}}, !{{.*}}))
+; CHECK: = or({{.*}},and({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C2.orn(i32, i32)
define i32 @C2_orn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, !{{.*}})
+; CHECK: = or({{.*}},!{{.*}})
declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32)
define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, or({{.*}}, !{{.*}}))
+; CHECK: = or({{.*}},or({{.*}},!{{.*}}))
diff --git a/test/CodeGen/Hexagon/intrinsics/system_user.ll b/test/CodeGen/Hexagon/intrinsics/system_user.ll
index dad4effb0a1..ac4c53e221d 100644
--- a/test/CodeGen/Hexagon/intrinsics/system_user.ll
+++ b/test/CodeGen/Hexagon/intrinsics/system_user.ll
@@ -10,4 +10,4 @@ define void @prefetch(i8* %a) {
call void @llvm.hexagon.prefetch(i8* %a)
ret void
}
-; CHECK: dcfetch({{.*}} + #0)
+; CHECK: dcfetch({{.*}}+#0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
index c5c23c22bde..4d630c62005 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
@@ -34,42 +34,42 @@ define i32 @S4_addaddi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = add({{.*}}, add({{.*}}, #0))
+; CHECK: = add({{.*}},add({{.*}},#0))
declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32)
define i32 @S4_subaddi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, sub(#0, {{.*}}))
+; CHECK: = add({{.*}},sub(#0,{{.*}}))
declare i32 @llvm.hexagon.M2.accii(i32, i32, i32)
define i32 @M2_accii(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += add({{.*}}, #0)
+; CHECK: += add({{.*}},#0)
declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32)
define i32 @M2_naccii(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= add({{.*}}, #0)
+; CHECK: -= add({{.*}},#0)
declare i32 @llvm.hexagon.M2.acci(i32, i32, i32)
define i32 @M2_acci(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += add({{.*}}, {{.*}})
+; CHECK: += add({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32)
define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= add({{.*}}, {{.*}})
+; CHECK: -= add({{.*}},{{.*}})
; Add doublewords
declare i64 @llvm.hexagon.A2.addp(i64, i64)
@@ -77,14 +77,14 @@ define i64 @A2_addp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = add({{.*}}, {{.*}})
+; CHECK: = add({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.addpsat(i64, i64)
define i64 @A2_addpsat(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = add({{.*}}, {{.*}}):sat
+; CHECK: = add({{.*}},{{.*}}):sat
; Add halfword
declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32)
@@ -92,84 +92,84 @@ define i32 @A2_addh_l16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l)
+; CHECK: = add({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32)
define i32 @A2_addh_l16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h)
+; CHECK: = add({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32)
define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l):sat
+; CHECK: = add({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32)
define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h):sat
+; CHECK: = add({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32)
define i32 @A2_addh_h16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l):<<16
+; CHECK: = add({{.*}}.l,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32)
define i32 @A2_addh_h16_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h):<<16
+; CHECK: = add({{.*}}.l,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32)
define i32 @A2_addh_h16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.l):<<16
+; CHECK: = add({{.*}}.h,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32)
define i32 @A2_addh_h16_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.h):<<16
+; CHECK: = add({{.*}}.h,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32)
define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l):sat:<<16
+; CHECK: = add({{.*}}.l,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32)
define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h):sat:<<16
+; CHECK: = add({{.*}}.l,{{.*}}.h):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32)
define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.l):sat:<<16
+; CHECK: = add({{.*}}.h,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32)
define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.h):sat:<<16
+; CHECK: = add({{.*}}.h,{{.*}}.h):sat:<<16
; Logical doublewords
declare i64 @llvm.hexagon.A2.notp(i64)
@@ -184,35 +184,35 @@ define i64 @A2_andp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = and({{.*}}, {{.*}})
+; CHECK: = and({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.andnp(i64, i64)
define i64 @A2_andnp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = and({{.*}}, ~{{.*}})
+; CHECK: = and({{.*}},~{{.*}})
declare i64 @llvm.hexagon.A2.orp(i64, i64)
define i64 @A2_orp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = or({{.*}}, {{.*}})
+; CHECK: = or({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.ornp(i64, i64)
define i64 @A2_ornp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = or({{.*}}, ~{{.*}})
+; CHECK: = or({{.*}},~{{.*}})
declare i64 @llvm.hexagon.A2.xorp(i64, i64)
define i64 @A2_xorp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = xor({{.*}}, {{.*}})
+; CHECK: = xor({{.*}},{{.*}})
; Logical-logical doublewords
declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64)
@@ -220,7 +220,7 @@ define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: ^= xor({{.*}}, {{.*}})
+; CHECK: ^= xor({{.*}},{{.*}})
; Logical-logical words
declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32)
@@ -228,91 +228,91 @@ define i32 @S4_or_andi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= and({{.*}}, #0)
+; CHECK: |= and({{.*}},#0)
declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32)
define i32 @S4_or_andix(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = or({{.*}}, and({{.*}}, #0))
+; CHECK: = or({{.*}},and({{.*}},#0))
declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32)
define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= and({{.*}}, ~{{.*}})
+; CHECK: |= and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32)
define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= and({{.*}}, ~{{.*}})
+; CHECK: &= and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32)
define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: ^= and({{.*}}, ~{{.*}})
+; CHECK: ^= and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32)
define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= and({{.*}}, {{.*}})
+; CHECK: &= and({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32)
define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= or({{.*}}, {{.*}})
+; CHECK: &= or({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32)
define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= xor({{.*}}, {{.*}})
+; CHECK: &= xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32)
define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= and({{.*}}, {{.*}})
+; CHECK: |= and({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32)
define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= or({{.*}}, {{.*}})
+; CHECK: |= or({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32)
define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= xor({{.*}}, {{.*}})
+; CHECK: |= xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32)
define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: ^= and({{.*}}, {{.*}})
+; CHECK: ^= and({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32)
define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: ^= or({{.*}}, {{.*}})
+; CHECK: ^= or({{.*}},{{.*}})
; Maximum words
declare i32 @llvm.hexagon.A2.max(i32, i32)
@@ -320,14 +320,14 @@ define i32 @A2_max(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = max({{.*}}, {{.*}})
+; CHECK: = max({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.maxu(i32, i32)
define i32 @A2_maxu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = maxu({{.*}}, {{.*}})
+; CHECK: = maxu({{.*}},{{.*}})
; Maximum doublewords
declare i64 @llvm.hexagon.A2.maxp(i64, i64)
@@ -335,14 +335,14 @@ define i64 @A2_maxp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = max({{.*}}, {{.*}})
+; CHECK: = max({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.maxup(i64, i64)
define i64 @A2_maxup(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = maxu({{.*}}, {{.*}})
+; CHECK: = maxu({{.*}},{{.*}})
; Minimum words
declare i32 @llvm.hexagon.A2.min(i32, i32)
@@ -350,14 +350,14 @@ define i32 @A2_min(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = min({{.*}}, {{.*}})
+; CHECK: = min({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.minu(i32, i32)
define i32 @A2_minu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = minu({{.*}}, {{.*}})
+; CHECK: = minu({{.*}},{{.*}})
; Minimum doublewords
declare i64 @llvm.hexagon.A2.minp(i64, i64)
@@ -365,14 +365,14 @@ define i64 @A2_minp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = min({{.*}}, {{.*}})
+; CHECK: = min({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.minup(i64, i64)
define i64 @A2_minup(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = minu({{.*}}, {{.*}})
+; CHECK: = minu({{.*}},{{.*}})
; Module wrap
declare i32 @llvm.hexagon.A4.modwrapu(i32, i32)
@@ -380,7 +380,7 @@ define i32 @A4_modwrapu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = modwrap({{.*}}, {{.*}})
+; CHECK: = modwrap({{.*}},{{.*}})
; Negate
declare i64 @llvm.hexagon.A2.negp(i64)
@@ -410,42 +410,42 @@ define i32 @A4_cround_ri(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cround({{.*}}, #0)
+; CHECK: = cround({{.*}},#0)
declare i32 @llvm.hexagon.A4.round.ri(i32, i32)
define i32 @A4_round_ri(i32 %a) {
%z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = round({{.*}}, #0)
+; CHECK: = round({{.*}},#0)
declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32)
define i32 @A4_round_ri_sat(i32 %a) {
%z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = round({{.*}}, #0):sat
+; CHECK: = round({{.*}},#0):sat
declare i32 @llvm.hexagon.A4.cround.rr(i32, i32)
define i32 @A4_cround_rr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cround({{.*}}, {{.*}})
+; CHECK: = cround({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.round.rr(i32, i32)
define i32 @A4_round_rr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = round({{.*}}, {{.*}})
+; CHECK: = round({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32)
define i32 @A4_round_rr_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = round({{.*}}, {{.*}}):sat
+; CHECK: = round({{.*}},{{.*}}):sat
; Subtract doublewords
declare i64 @llvm.hexagon.A2.subp(i64, i64)
@@ -453,7 +453,7 @@ define i64 @A2_subp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = sub({{.*}}, {{.*}})
+; CHECK: = sub({{.*}},{{.*}})
; Subtract and accumulate
declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32)
@@ -461,7 +461,7 @@ define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += sub({{.*}}, {{.*}})
+; CHECK: += sub({{.*}},{{.*}})
; Subtract halfwords
declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32)
@@ -469,84 +469,84 @@ define i32 @A2_subh_l16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l)
+; CHECK: = sub({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32)
define i32 @A2_subh_l16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h)
+; CHECK: = sub({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32)
define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l):sat
+; CHECK: = sub({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32)
define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h):sat
+; CHECK: = sub({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32)
define i32 @A2_subh_h16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l):<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32)
define i32 @A2_subh_h16_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h):<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32)
define i32 @A2_subh_h16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.l):<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32)
define i32 @A2_subh_h16_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.h):<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32)
define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l):sat:<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32)
define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h):sat:<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.h):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32)
define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.l):sat:<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32)
define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.h):sat:<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.h):sat:<<16
; Sign extend word to doubleword
declare i64 @llvm.hexagon.A2.sxtw(i32)
@@ -592,7 +592,7 @@ define i64 @M2_vabsdiffh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vabsdiffh({{.*}}, {{.*}})
+; CHECK: = vabsdiffh({{.*}},{{.*}})
; Vector absolute difference words
declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64)
@@ -600,7 +600,7 @@ define i64 @M2_vabsdiffw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vabsdiffw({{.*}}, {{.*}})
+; CHECK: = vabsdiffw({{.*}},{{.*}})
; Vector add halfwords
declare i64 @llvm.hexagon.A2.vaddh(i64, i64)
@@ -608,21 +608,21 @@ define i64 @A2_vaddh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}})
+; CHECK: = vaddh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vaddhs(i64, i64)
define i64 @A2_vaddhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}}):sat
+; CHECK: = vaddh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.A2.vadduhs(i64, i64)
define i64 @A2_vadduhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vadduh({{.*}}, {{.*}}):sat
+; CHECK: = vadduh({{.*}},{{.*}}):sat
; Vector add halfwords with saturate and pack to unsigned bytes
declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64)
@@ -630,7 +630,7 @@ define i32 @A5_vaddhubs(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vaddhub({{.*}}, {{.*}}):sat
+; CHECK: = vaddhub({{.*}},{{.*}}):sat
; Vector reduce add unsigned bytes
declare i64 @llvm.hexagon.A2.vraddub(i64, i64)
@@ -638,14 +638,14 @@ define i64 @A2_vraddub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vraddub({{.*}}, {{.*}})
+; CHECK: = vraddub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64)
define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vraddub({{.*}}, {{.*}})
+; CHECK: += vraddub({{.*}},{{.*}})
; Vector reduce add halfwords
declare i32 @llvm.hexagon.M2.vradduh(i64, i64)
@@ -653,14 +653,14 @@ define i32 @M2_vradduh(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vradduh({{.*}}, {{.*}})
+; CHECK: = vradduh({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.vraddh(i64, i64)
define i32 @M2_vraddh(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vraddh({{.*}}, {{.*}})
+; CHECK: = vraddh({{.*}},{{.*}})
; Vector add bytes
declare i64 @llvm.hexagon.A2.vaddub(i64, i64)
@@ -668,14 +668,14 @@ define i64 @A2_vaddub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddub({{.*}}, {{.*}})
+; CHECK: = vaddub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vaddubs(i64, i64)
define i64 @A2_vaddubs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddub({{.*}}, {{.*}}):sat
+; CHECK: = vaddub({{.*}},{{.*}}):sat
; Vector add words
declare i64 @llvm.hexagon.A2.vaddw(i64, i64)
@@ -683,14 +683,14 @@ define i64 @A2_vaddw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddw({{.*}}, {{.*}})
+; CHECK: = vaddw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vaddws(i64, i64)
define i64 @A2_vaddws(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddw({{.*}}, {{.*}}):sat
+; CHECK: = vaddw({{.*}},{{.*}}):sat
; Vector average halfwords
declare i64 @llvm.hexagon.A2.vavgh(i64, i64)
@@ -698,56 +698,56 @@ define i64 @A2_vavgh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}})
+; CHECK: = vavgh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavghr(i64, i64)
define i64 @A2_vavghr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}}):rnd
+; CHECK: = vavgh({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vavghcr(i64, i64)
define i64 @A2_vavghcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}}):crnd
+; CHECK: = vavgh({{.*}},{{.*}}):crnd
declare i64 @llvm.hexagon.A2.vavguh(i64, i64)
define i64 @A2_vavguh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguh({{.*}}, {{.*}})
+; CHECK: = vavguh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavguhr(i64, i64)
define i64 @A2_vavguhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguh({{.*}}, {{.*}}):rnd
+; CHECK: = vavguh({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgh(i64, i64)
define i64 @A2_vnavgh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}})
+; CHECK: = vnavgh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vnavghr(i64, i64)
define i64 @A2_vnavghr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}}):rnd
+; CHECK: = vnavgh({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64)
define i64 @A2_vnavghcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}}):crnd
+; CHECK: = vnavgh({{.*}},{{.*}}):crnd
; Vector average unsigned bytes
declare i64 @llvm.hexagon.A2.vavgub(i64, i64)
@@ -755,14 +755,14 @@ define i64 @A2_vavgub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: vavgub({{.*}}, {{.*}})
+; CHECK: vavgub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavgubr(i64, i64)
define i64 @A2_vavgubr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgub({{.*}}, {{.*}}):rnd
+; CHECK: = vavgub({{.*}},{{.*}}):rnd
; Vector average words
declare i64 @llvm.hexagon.A2.vavgw(i64, i64)
@@ -770,56 +770,56 @@ define i64 @A2_vavgw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgw({{.*}}, {{.*}})
+; CHECK: = vavgw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavgwr(i64, i64)
define i64 @A2_vavgwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgw({{.*}}, {{.*}}):rnd
+; CHECK: = vavgw({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64)
define i64 @A2_vavgwcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgw({{.*}}, {{.*}}):crnd
+; CHECK: = vavgw({{.*}},{{.*}}):crnd
declare i64 @llvm.hexagon.A2.vavguw(i64, i64)
define i64 @A2_vavguw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguw({{.*}}, {{.*}})
+; CHECK: = vavguw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavguwr(i64, i64)
define i64 @A2_vavguwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguw({{.*}}, {{.*}}):rnd
+; CHECK: = vavguw({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgw(i64, i64)
define i64 @A2_vnavgw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgw({{.*}}, {{.*}})
+; CHECK: = vnavgw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64)
define i64 @A2_vnavgwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgw({{.*}}, {{.*}}):rnd
+; CHECK: = vnavgw({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64)
define i64 @A2_vnavgwcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgw({{.*}}, {{.*}}):crnd
+; CHECK: = vnavgw({{.*}},{{.*}}):crnd
; Vector conditional negate
declare i64 @llvm.hexagon.S2.vcnegh(i64, i32)
@@ -827,14 +827,14 @@ define i64 @S2_vcnegh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vcnegh({{.*}}, {{.*}})
+; CHECK: = vcnegh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32)
define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vrcnegh({{.*}}, {{.*}})
+; CHECK: += vrcnegh({{.*}},{{.*}})
; Vector maximum bytes
declare i64 @llvm.hexagon.A2.vmaxub(i64, i64)
@@ -842,14 +842,14 @@ define i64 @A2_vmaxub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxub({{.*}}, {{.*}})
+; CHECK: = vmaxub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vmaxb(i64, i64)
define i64 @A2_vmaxb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxb({{.*}}, {{.*}})
+; CHECK: = vmaxb({{.*}},{{.*}})
; Vector maximum halfwords
declare i64 @llvm.hexagon.A2.vmaxh(i64, i64)
@@ -857,14 +857,14 @@ define i64 @A2_vmaxh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxh({{.*}}, {{.*}})
+; CHECK: = vmaxh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64)
define i64 @A2_vmaxuh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxuh({{.*}}, {{.*}})
+; CHECK: = vmaxuh({{.*}},{{.*}})
; Vector reduce maximum halfwords
declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32)
@@ -872,14 +872,14 @@ define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrmaxh({{.*}}, {{.*}})
+; CHECK: = vrmaxh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32)
define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrmaxuh({{.*}}, {{.*}})
+; CHECK: = vrmaxuh({{.*}},{{.*}})
; Vector reduce maximum words
declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32)
@@ -887,14 +887,14 @@ define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrmaxw({{.*}}, {{.*}})
+; CHECK: = vrmaxw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32)
define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: vrmaxuw({{.*}}, {{.*}})
+; CHECK: vrmaxuw({{.*}},{{.*}})
; Vector minimum bytes
declare i64 @llvm.hexagon.A2.vminub(i64, i64)
@@ -902,14 +902,14 @@ define i64 @A2_vminub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminub({{.*}}, {{.*}})
+; CHECK: = vminub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vminb(i64, i64)
define i64 @A2_vminb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminb({{.*}}, {{.*}})
+; CHECK: = vminb({{.*}},{{.*}})
; Vector minimum halfwords
declare i64 @llvm.hexagon.A2.vminh(i64, i64)
@@ -917,14 +917,14 @@ define i64 @A2_vminh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminh({{.*}}, {{.*}})
+; CHECK: = vminh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vminuh(i64, i64)
define i64 @A2_vminuh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminuh({{.*}}, {{.*}})
+; CHECK: = vminuh({{.*}},{{.*}})
; Vector reduce minimum halfwords
declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32)
@@ -932,14 +932,14 @@ define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminh({{.*}}, {{.*}})
+; CHECK: = vrminh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32)
define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminuh({{.*}}, {{.*}})
+; CHECK: = vrminuh({{.*}},{{.*}})
; Vector reduce minimum words
declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32)
@@ -947,14 +947,14 @@ define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminw({{.*}}, {{.*}})
+; CHECK: = vrminw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32)
define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminuw({{.*}}, {{.*}})
+; CHECK: = vrminuw({{.*}},{{.*}})
; Vector sum of absolute differences unsigned bytes
declare i64 @llvm.hexagon.A2.vrsadub(i64, i64)
@@ -962,14 +962,14 @@ define i64 @A2_vrsadub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrsadub({{.*}}, {{.*}})
+; CHECK: = vrsadub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64)
define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrsadub({{.*}}, {{.*}})
+; CHECK: += vrsadub({{.*}},{{.*}})
; Vector subtract halfwords
declare i64 @llvm.hexagon.A2.vsubh(i64, i64)
@@ -977,21 +977,21 @@ define i64 @A2_vsubh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}})
+; CHECK: = vsubh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vsubhs(i64, i64)
define i64 @A2_vsubhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}}):sat
+; CHECK: = vsubh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64)
define i64 @A2_vsubuhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubuh({{.*}}, {{.*}}):sat
+; CHECK: = vsubuh({{.*}},{{.*}}):sat
; Vector subtract bytes
declare i64 @llvm.hexagon.A2.vsubub(i64, i64)
@@ -999,14 +999,14 @@ define i64 @A2_vsubub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubub({{.*}}, {{.*}})
+; CHECK: = vsubub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vsububs(i64, i64)
define i64 @A2_vsububs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubub({{.*}}, {{.*}}):sat
+; CHECK: = vsubub({{.*}},{{.*}}):sat
; Vector subtract words
declare i64 @llvm.hexagon.A2.vsubw(i64, i64)
@@ -1014,11 +1014,11 @@ define i64 @A2_vsubw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubw({{.*}}, {{.*}})
+; CHECK: = vsubw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vsubws(i64, i64)
define i64 @A2_vsubws(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubw({{.*}}, {{.*}}):sat
+; CHECK: = vsubw({{.*}},{{.*}}):sat
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
index e8f83d01820..ec7613e3ef2 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
@@ -38,14 +38,14 @@ define i32 @S4_clbpaddi(i64 %a) {
%z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(clb({{.*}}), #0)
+; CHECK: = add(clb({{.*}}),#0)
declare i32 @llvm.hexagon.S4.clbaddi(i32, i32)
define i32 @S4_clbaddi(i32 %a) {
%z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(clb({{.*}}), #0)
+; CHECK: = add(clb({{.*}}),#0)
declare i32 @llvm.hexagon.S2.cl0(i32)
define i32 @S2_cl0(i32 %a) {
@@ -111,56 +111,56 @@ define i64 @S2_extractup(i64 %a) {
%z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0)
ret i64 %z
}
-; CHECK: = extractu({{.*}}, #0, #0)
+; CHECK: = extractu({{.*}},#0,#0)
declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32)
define i64 @S2_extractp(i64 %a) {
%z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0)
ret i64 %z
}
-; CHECK: = extract({{.*}}, #0, #0)
+; CHECK: = extract({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32)
define i32 @S2_extractu(i32 %a) {
%z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = extractu({{.*}}, #0, #0)
+; CHECK: = extractu({{.*}},#0,#0)
declare i32 @llvm.hexagon.S4.extract(i32, i32, i32)
define i32 @S2_extract(i32 %a) {
%z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = extract({{.*}}, #0, #0)
+; CHECK: = extract({{.*}},#0,#0)
declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64)
define i64 @S2_extractup_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = extractu({{.*}}, {{.*}})
+; CHECK: = extractu({{.*}},{{.*}})
declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64)
define i64 @S4_extractp_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = extract({{.*}}, {{.*}})
+; CHECK: = extract({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64)
define i32 @S2_extractu_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b)
ret i32 %z
}
-; CHECK: = extractu({{.*}}, {{.*}})
+; CHECK: = extractu({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.extract.rp(i32, i64)
define i32 @S4_extract_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b)
ret i32 %z
}
-; CHECK: = extract({{.*}}, {{.*}})
+; CHECK: = extract({{.*}},{{.*}})
; Insert bitfield
declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32)
@@ -168,28 +168,28 @@ define i64 @S2_insertp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0)
ret i64 %z
}
-; CHECK: = insert({{.*}}, #0, #0)
+; CHECK: = insert({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32)
define i32 @S2_insert(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = insert({{.*}}, #0, #0)
+; CHECK: = insert({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64)
define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) {
%z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c)
ret i32 %z
}
-; CHECK: = insert({{.*}}, {{.*}})
+; CHECK: = insert({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64)
define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: = insert({{.*}}, r5:4)
+; CHECK: = insert({{.*}},r5:4)
; Interleave/deinterleave
declare i64 @llvm.hexagon.S2.deinterleave(i64)
@@ -212,7 +212,7 @@ define i64 @S2_lfsp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = lfs({{.*}}, {{.*}})
+; CHECK: = lfs({{.*}},{{.*}})
; Masked parity
declare i32 @llvm.hexagon.S2.parityp(i64, i64)
@@ -220,14 +220,14 @@ define i32 @S2_parityp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = parity({{.*}}, {{.*}})
+; CHECK: = parity({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.parity(i32, i32)
define i32 @S4_parity(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = parity({{.*}}, {{.*}})
+; CHECK: = parity({{.*}},{{.*}})
; Bit reverse
declare i64 @llvm.hexagon.S2.brevp(i64)
@@ -250,42 +250,42 @@ define i32 @S2_setbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = setbit({{.*}}, #0)
+; CHECK: = setbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32)
define i32 @S2_clrbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = clrbit({{.*}}, #0)
+; CHECK: = clrbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32)
define i32 @S2_togglebit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = togglebit({{.*}}, #0)
+; CHECK: = togglebit({{.*}},#0)
declare i32 @llvm.hexagon.S2.setbit.r(i32, i32)
define i32 @S2_setbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = setbit({{.*}}, {{.*}})
+; CHECK: = setbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32)
define i32 @S2_clrbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = clrbit({{.*}}, {{.*}})
+; CHECK: = clrbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32)
define i32 @S2_togglebit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = togglebit({{.*}}, {{.*}})
+; CHECK: = togglebit({{.*}},{{.*}})
; Split bitfield
declare i64 @llvm.hexagon.A4.bitspliti(i32, i32)
@@ -293,14 +293,14 @@ define i64 @A4_bitspliti(i32 %a) {
%z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0)
ret i64 %z
}
-; CHECK: = bitsplit({{.*}}, #0)
+; CHECK: = bitsplit({{.*}},#0)
declare i64 @llvm.hexagon.A4.bitsplit(i32, i32)
define i64 @A4_bitsplit(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = bitsplit({{.*}}, {{.*}})
+; CHECK: = bitsplit({{.*}},{{.*}})
; Table index
declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32)
@@ -308,25 +308,25 @@ define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxb({{.*}}, #0, #0)
+; CHECK: = tableidxb({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxh({{.*}}, #0, #-1)
+; CHECK: = tableidxh({{.*}},#0,#-1)
declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxw({{.*}}, #0, #-2)
+; CHECK: = tableidxw({{.*}},#0,#-2)
declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxd({{.*}}, #0, #-3)
+; CHECK: = tableidxd({{.*}},#0,#-3)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
index 0087883573e..254b928aa98 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
@@ -10,28 +10,28 @@ define i64 @S4_vxaddsubh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxaddsubh({{.*}}, {{.*}}):sat
+; CHECK: = vxaddsubh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64)
define i64 @S4_vxsubaddh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxsubaddh({{.*}}, {{.*}}):sat
+; CHECK: = vxsubaddh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64)
define i64 @S4_vxaddsubhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxaddsubh({{.*}}, {{.*}}):rnd:>>1:sat
+; CHECK: = vxaddsubh({{.*}},{{.*}}):rnd:>>1:sat
declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64)
define i64 @S4_vxsubaddhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxsubaddh({{.*}}, {{.*}}):rnd:>>1:sat
+; CHECK: = vxsubaddh({{.*}},{{.*}}):rnd:>>1:sat
; Complex add/sub words
declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64)
@@ -39,14 +39,14 @@ define i64 @S4_vxaddsubw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxaddsubw({{.*}}, {{.*}}):sat
+; CHECK: = vxaddsubw({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64)
define i64 @S4_vxsubaddw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxsubaddw({{.*}}, {{.*}}):sat
+; CHECK: = vxsubaddw({{.*}},{{.*}}):sat
; Complex multiply
declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32)
@@ -54,84 +54,84 @@ define i64 @M2_cmpys_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):sat
+; CHECK: = cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32)
define i64 @M2_cmpys_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: = cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32)
define i64 @M2_cmpysc_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):sat
+; CHECK: = cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32)
define i64 @M2_cmpysc_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:sat
+; CHECK: = cmpy({{.*}},{{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32)
define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}):sat
+; CHECK: += cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32)
define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: += cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32)
define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}):sat
+; CHECK: -= cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32)
define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: -= cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32)
define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}*):sat
+; CHECK: += cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32)
define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}*):<<1:sat
+; CHECK: += cmpy({{.*}},{{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32)
define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}*):sat
+; CHECK: -= cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32)
define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}*):<<1:sat
+; CHECK: -= cmpy({{.*}},{{.*}}*):<<1:sat
; Complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32)
@@ -139,28 +139,28 @@ define i64 @M2_cmpyi_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpyi({{.*}}, {{.*}})
+; CHECK: = cmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32)
define i64 @M2_cmpyr_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpyr({{.*}}, {{.*}})
+; CHECK: = cmpyr({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32)
define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpyi({{.*}}, {{.*}})
+; CHECK: += cmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32)
define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpyr({{.*}}, {{.*}})
+; CHECK: += cmpyr({{.*}},{{.*}})
; Complex multiply with round and pack
declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32)
@@ -168,28 +168,28 @@ define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32)
define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32)
define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}*):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32)
define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}*):<<1:rnd:sat
; Complex multiply 32x16
declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32)
@@ -197,28 +197,28 @@ define i32 @M4_cmpyi_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyiwh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = cmpyiwh({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32)
define i32 @M4_cmpyi_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyiwh({{.*}}, {{.*}}*):<<1:rnd:sat
+; CHECK: = cmpyiwh({{.*}},{{.*}}*):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32)
define i32 @M4_cmpyr_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyrwh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = cmpyrwh({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32)
define i32 @M4_cmpyr_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyrwh({{.*}}, {{.*}}*):<<1:rnd:sat
+; CHECK: = cmpyrwh({{.*}},{{.*}}*):<<1:rnd:sat
; Vector complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64)
@@ -226,42 +226,42 @@ define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyr({{.*}}, {{.*}}):sat
+; CHECK: = vcmpyr({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64)
define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyr({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vcmpyr({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64)
define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyi({{.*}}, {{.*}}):sat
+; CHECK: = vcmpyi({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64)
define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyi({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vcmpyi({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vcmpyr({{.*}}, r5:4):sat
+; CHECK: += vcmpyr({{.*}},r5:4):sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vcmpyi({{.*}}, r5:4):sat
+; CHECK: += vcmpyi({{.*}},r5:4):sat
; Vector complex conjugate
declare i64 @llvm.hexagon.A2.vconj(i64)
@@ -277,7 +277,7 @@ define i64 @S2_vcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vcrotate({{.*}}, {{.*}})
+; CHECK: = vcrotate({{.*}},{{.*}})
; Vector reduce complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64)
@@ -285,56 +285,56 @@ define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyi({{.*}}, {{.*}})
+; CHECK: = vrcmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64)
define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyr({{.*}}, {{.*}})
+; CHECK: = vrcmpyr({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64)
define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyi({{.*}}, {{.*}}*)
+; CHECK: = vrcmpyi({{.*}},{{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64)
define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyr({{.*}}, {{.*}}*)
+; CHECK: = vrcmpyr({{.*}},{{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64)
define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyi({{.*}}, r5:4)
+; CHECK: += vrcmpyi({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64)
define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyr({{.*}}, r5:4)
+; CHECK: += vrcmpyr({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64)
define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyi({{.*}}, r5:4*)
+; CHECK: += vrcmpyi({{.*}},r5:4*)
declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64)
define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyr({{.*}}, r5:4*)
+; CHECK: += vrcmpyr({{.*}},r5:4*)
; Vector reduce complex rotate
declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32)
@@ -342,11 +342,11 @@ define i64 @S4_vrcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0)
ret i64 %z
}
-; CHECK: = vrcrotate({{.*}}, {{.*}}, #0)
+; CHECK: = vrcrotate({{.*}},{{.*}},#0)
declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32)
define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0)
ret i64 %z
}
-; CHECK: += vrcrotate({{.*}}, {{.*}}, #0)
+; CHECK: += vrcrotate({{.*}},{{.*}},#0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
index 598d0a83206..ee56e905162 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
@@ -11,7 +11,7 @@ define float @F2_sfadd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfadd(float %a, float %b)
ret float %z
}
-; CHECK: = sfadd({{.*}}, {{.*}})
+; CHECK: = sfadd({{.*}},{{.*}})
; Classify floating-point value
declare i32 @llvm.hexagon.F2.sfclass(float, i32)
@@ -19,14 +19,14 @@ define i32 @F2_sfclass(float %a) {
%z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0)
ret i32 %z
}
-; CHECK: = sfclass({{.*}}, #0)
+; CHECK: = sfclass({{.*}},#0)
declare i32 @llvm.hexagon.F2.dfclass(double, i32)
define i32 @F2_dfclass(double %a) {
%z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0)
ret i32 %z
}
-; CHECK: = dfclass({{.*}}, #0)
+; CHECK: = dfclass({{.*}},#0)
; Compare floating-point value
declare i32 @llvm.hexagon.F2.sfcmpge(float, float)
@@ -34,56 +34,56 @@ define i32 @F2_sfcmpge(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.ge({{.*}}, {{.*}})
+; CHECK: = sfcmp.ge({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpuo(float, float)
define i32 @F2_sfcmpuo(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.uo({{.*}}, {{.*}})
+; CHECK: = sfcmp.uo({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpeq(float, float)
define i32 @F2_sfcmpeq(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.eq({{.*}}, {{.*}})
+; CHECK: = sfcmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpgt(float, float)
define i32 @F2_sfcmpgt(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.gt({{.*}}, {{.*}})
+; CHECK: = sfcmp.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpge(double, double)
define i32 @F2_dfcmpge(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.ge({{.*}}, {{.*}})
+; CHECK: = dfcmp.ge({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpuo(double, double)
define i32 @F2_dfcmpuo(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.uo({{.*}}, {{.*}})
+; CHECK: = dfcmp.uo({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpeq(double, double)
define i32 @F2_dfcmpeq(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.eq({{.*}}, {{.*}})
+; CHECK: = dfcmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpgt(double, double)
define i32 @F2_dfcmpgt(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.gt({{.*}}, {{.*}})
+; CHECK: = dfcmp.gt({{.*}},{{.*}})
; Convert floating-point value to other format
declare double @llvm.hexagon.F2.conv.sf2df(float)
@@ -283,14 +283,14 @@ define float @F2_sffixupn(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b)
ret float %z
}
-; CHECK: = sffixupn({{.*}}, {{.*}})
+; CHECK: = sffixupn({{.*}},{{.*}})
declare float @llvm.hexagon.F2.sffixupd(float, float)
define float @F2_sffixupd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b)
ret float %z
}
-; CHECK: = sffixupd({{.*}}, {{.*}})
+; CHECK: = sffixupd({{.*}},{{.*}})
; Floating point fused multiply-add
declare float @llvm.hexagon.F2.sffma(float, float, float)
@@ -298,14 +298,14 @@ define float @F2_sffma(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c)
ret float %z
}
-; CHECK: += sfmpy({{.*}}, {{.*}})
+; CHECK: += sfmpy({{.*}},{{.*}})
declare float @llvm.hexagon.F2.sffms(float, float, float)
define float @F2_sffms(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c)
ret float %z
}
-; CHECK: -= sfmpy({{.*}}, {{.*}})
+; CHECK: -= sfmpy({{.*}},{{.*}})
; Floating point fused multiply-add with scaling
declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32)
@@ -313,7 +313,7 @@ define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) {
%z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d)
ret float %z
}
-; CHECK: += sfmpy({{.*}}, {{.*}}, {{.*}}):scale
+; CHECK: += sfmpy({{.*}},{{.*}},{{.*}}):scale
; Floating point fused multiply-add for library routines
declare float @llvm.hexagon.F2.sffma.lib(float, float, float)
@@ -321,14 +321,14 @@ define float @F2_sffma_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c)
ret float %z
}
-; CHECK: += sfmpy({{.*}}, {{.*}}):lib
+; CHECK: += sfmpy({{.*}},{{.*}}):lib
declare float @llvm.hexagon.F2.sffms.lib(float, float, float)
define float @F2_sffms_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c)
ret float %z
}
-; CHECK: -= sfmpy({{.*}}, {{.*}}):lib
+; CHECK: -= sfmpy({{.*}},{{.*}}):lib
; Create floating-point constant
declare float @llvm.hexagon.F2.sfimm.p(i32)
@@ -365,7 +365,7 @@ define float @F2_sfmax(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmax(float %a, float %b)
ret float %z
}
-; CHECK: = sfmax({{.*}}, {{.*}})
+; CHECK: = sfmax({{.*}},{{.*}})
; Floating point minimum
declare float @llvm.hexagon.F2.sfmin(float, float)
@@ -373,7 +373,7 @@ define float @F2_sfmin(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmin(float %a, float %b)
ret float %z
}
-; CHECK: = sfmin({{.*}}, {{.*}})
+; CHECK: = sfmin({{.*}},{{.*}})
; Floating point multiply
declare float @llvm.hexagon.F2.sfmpy(float, float)
@@ -381,7 +381,7 @@ define float @F2_sfmpy(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b)
ret float %z
}
-; CHECK: = sfmpy({{.*}}, {{.*}})
+; CHECK: = sfmpy({{.*}},{{.*}})
; Floating point subtraction
declare float @llvm.hexagon.F2.sfsub(float, float)
@@ -389,4 +389,4 @@ define float @F2_sfsub(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfsub(float %a, float %b)
ret float %z
}
-; CHECK: = sfsub({{.*}}, {{.*}})
+; CHECK: = sfsub({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
index a1490499fbf..4da4a8a6393 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
@@ -11,35 +11,35 @@ define i32 @M4_mpyrr_addi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add(#0, mpyi({{.*}}, {{.*}}))
+; CHECK: = add(#0,mpyi({{.*}},{{.*}}))
declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32)
define i32 @M4_mpyri_addi(i32 %a) {
%z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(#0, mpyi({{.*}}, #0))
+; CHECK: = add(#0,mpyi({{.*}},#0))
declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32)
define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, mpyi(#0, {{.*}}))
+; CHECK: = add({{.*}},mpyi(#0,{{.*}}))
declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32)
define i32 @M4_mpyri_addr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = add({{.*}}, mpyi({{.*}}, #0))
+; CHECK: = add({{.*}},mpyi({{.*}},#0))
declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32)
define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = add({{.*}}, mpyi({{.*}}, {{.*}}))
+; CHECK: = add({{.*}},mpyi({{.*}},{{.*}}))
; Vector multiply word by signed half (32x16)
declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64)
@@ -47,56 +47,56 @@ define i64 @M2_mmpyl_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64)
define i64 @M2_mmpyl_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64)
define i64 @M2_mmpyh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64)
define i64 @M2_mmpyh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64)
define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64)
define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):<<1:rnd:sat
declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64)
define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64)
define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):<<1:rnd:sat
; Vector multiply word by unsigned half (32x16)
declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64)
@@ -104,56 +104,56 @@ define i64 @M2_mmpyul_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64)
define i64 @M2_mmpyul_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64)
define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64)
define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64)
define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64)
define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):<<1:rnd:sat
declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64)
define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64)
define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):<<1:rnd:sat
; Multiply signed halfwords
declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32)
@@ -161,616 +161,616 @@ define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l)
+; CHECK: = mpy({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32)
define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32)
define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h)
+; CHECK: = mpy({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32)
define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32)
define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l)
+; CHECK: = mpy({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32)
define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32)
define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h)
+; CHECK: = mpy({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32)
define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32)
define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32)
define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32)
define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32)
define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32)
define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32)
define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32)
define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32)
define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l)
+; CHECK: += mpy({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h)
+; CHECK: += mpy({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l)
+; CHECK: += mpy({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h)
+; CHECK: += mpy({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32)
define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l)
+; CHECK: = mpy({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32)
define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32)
define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h)
+; CHECK: = mpy({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32)
define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32)
define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l)
+; CHECK: = mpy({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32)
define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32)
define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h)
+; CHECK: = mpy({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32)
define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32)
define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32)
define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32)
define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32)
define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32)
define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32)
define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32)
define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32)
define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32)
define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l)
+; CHECK: += mpy({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32)
define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h)
+; CHECK: += mpy({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32)
define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l)
+; CHECK: += mpy({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32)
define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h)
+; CHECK: += mpy({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1:sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1:sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1:sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1:sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32)
define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32)
define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32)
define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32)
define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1:sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1:sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1:sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1:sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1:sat
; Multiply unsigned halfwords
declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32)
@@ -778,336 +778,336 @@ define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32)
define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32)
define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32)
define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32)
define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32)
define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32)
define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32)
define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32)
define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32)
define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32)
define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32)
define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32)
define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32)
define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32)
define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32)
define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h):<<1
; Polynomial multiply words
declare i64 @llvm.hexagon.M4.pmpyw(i32, i32)
@@ -1115,14 +1115,14 @@ define i64 @M4_pmpyw(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = pmpyw({{.*}}, {{.*}})
+; CHECK: = pmpyw({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32)
define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: ^= pmpyw({{.*}}, {{.*}})
+; CHECK: ^= pmpyw({{.*}},{{.*}})
; Vector reduce multiply word by signed half
declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64)
@@ -1130,56 +1130,56 @@ define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpywoh({{.*}}, {{.*}})
+; CHECK: = vrmpywoh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64)
define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpywoh({{.*}}, {{.*}}):<<1
+; CHECK: = vrmpywoh({{.*}},{{.*}}):<<1
declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64)
define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpyweh({{.*}}, {{.*}})
+; CHECK: = vrmpyweh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64)
define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpyweh({{.*}}, {{.*}}):<<1
+; CHECK: = vrmpyweh({{.*}},{{.*}}):<<1
declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64)
define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpywoh({{.*}}, r5:4)
+; CHECK: += vrmpywoh({{.*}},r5:4)
declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64)
define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpywoh({{.*}}, r5:4):<<1
+; CHECK: += vrmpywoh({{.*}},r5:4):<<1
declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64)
define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpyweh({{.*}}, r5:4)
+; CHECK: += vrmpyweh({{.*}},r5:4)
declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64)
define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpyweh({{.*}}, r5:4):<<1
+; CHECK: += vrmpyweh({{.*}},r5:4):<<1
; Multiply and use upper result
declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32)
@@ -1187,84 +1187,84 @@ define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}):rnd
+; CHECK: = mpy({{.*}},{{.*}}):rnd
declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32)
define i32 @M2_mpyu_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}, {{.*}})
+; CHECK: = mpyu({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32)
define i32 @M2_mpysu_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpysu({{.*}}, {{.*}})
+; CHECK: = mpysu({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32)
define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:sat
+; CHECK: = mpy({{.*}},{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32)
define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:sat
+; CHECK: = mpy({{.*}},{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32)
define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}},{{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32)
define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: = mpy({{.*}},{{.*}}):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32)
define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}},{{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.up(i32, i32)
define i32 @M2_mpy_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}})
+; CHECK: = mpy({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32)
define i32 @M2_mpy_up_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}):<<1
+; CHECK: = mpy({{.*}},{{.*}}):<<1
declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32)
define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: += mpy({{.*}},{{.*}}):<<1:sat
declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32)
define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: -= mpy({{.*}},{{.*}}):<<1:sat
; Multiply and use full result
declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32)
@@ -1272,42 +1272,42 @@ define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}, {{.*}})
+; CHECK: = mpy({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32)
define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}, {{.*}})
+; CHECK: = mpyu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32)
define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}, {{.*}})
+; CHECK: += mpy({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32)
define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}, {{.*}})
+; CHECK: -= mpy({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32)
define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}, {{.*}})
+; CHECK: += mpyu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32)
define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}, {{.*}})
+; CHECK: -= mpyu({{.*}},{{.*}})
; Vector dual multiply
declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64)
@@ -1315,14 +1315,14 @@ define i64 @M2_vdmpys_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vdmpy({{.*}}, {{.*}}):sat
+; CHECK: = vdmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64)
define i64 @M2_vdmpys_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vdmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vdmpy({{.*}},{{.*}}):<<1:sat
; Vector reduce multiply bytes
declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64)
@@ -1330,28 +1330,28 @@ define i64 @M5_vrmpybuu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpybu({{.*}}, {{.*}})
+; CHECK: = vrmpybu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64)
define i64 @M5_vrmpybsu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpybsu({{.*}}, {{.*}})
+; CHECK: = vrmpybsu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64)
define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpybu({{.*}}, r5:4)
+; CHECK: += vrmpybu({{.*}},r5:4)
declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64)
define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpybsu({{.*}}, r5:4)
+; CHECK: += vrmpybsu({{.*}},r5:4)
; Vector dual multiply signed by unsigned bytes
declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64)
@@ -1359,14 +1359,14 @@ define i64 @M5_vdmpybsu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vdmpybsu({{.*}}, {{.*}}):sat
+; CHECK: = vdmpybsu({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64)
define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vdmpybsu({{.*}}, r5:4):sat
+; CHECK: += vdmpybsu({{.*}},r5:4):sat
; Vector multiply even halfwords
declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64)
@@ -1374,35 +1374,35 @@ define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyeh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyeh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64)
define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyeh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyeh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64)
define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vmpyeh({{.*}}, r5:4)
+; CHECK: += vmpyeh({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64)
define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vmpyeh({{.*}}, r5:4):sat
+; CHECK: += vmpyeh({{.*}},r5:4):sat
declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64)
define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vmpyeh({{.*}}, r5:4):<<1:sat
+; CHECK: += vmpyeh({{.*}},r5:4):<<1:sat
; Vector multiply halfwords
declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32)
@@ -1410,35 +1410,35 @@ define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32)
define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32)
define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyh({{.*}}, {{.*}})
+; CHECK: += vmpyh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32)
define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyh({{.*}}, {{.*}}):sat
+; CHECK: += vmpyh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32)
define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyh({{.*}}, {{.*}}):<<1:sat
+; CHECK: += vmpyh({{.*}},{{.*}}):<<1:sat
; Vector multiply halfwords signed by unsigned
declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32)
@@ -1446,28 +1446,28 @@ define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyhsu({{.*}}, {{.*}}):sat
+; CHECK: = vmpyhsu({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32)
define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyhsu({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyhsu({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32)
define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyhsu({{.*}}, {{.*}}):sat
+; CHECK: += vmpyhsu({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32)
define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyhsu({{.*}}, {{.*}}):<<1:sat
+; CHECK: += vmpyhsu({{.*}},{{.*}}):<<1:sat
; Vector reduce multiply halfwords
declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64)
@@ -1475,14 +1475,14 @@ define i64 @M2_vrmpy_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpyh({{.*}}, {{.*}})
+; CHECK: = vrmpyh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64)
define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpyh({{.*}}, r5:4)
+; CHECK: += vrmpyh({{.*}},r5:4)
; Vector multiply bytes
declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32)
@@ -1490,28 +1490,28 @@ define i64 @M2_vmpybsu(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpybsu({{.*}}, {{.*}})
+; CHECK: = vmpybsu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32)
define i64 @M2_vmpybuu(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpybu({{.*}}, {{.*}})
+; CHECK: = vmpybu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32)
define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpybu({{.*}}, {{.*}})
+; CHECK: += vmpybu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32)
define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpybsu({{.*}}, {{.*}})
+; CHECK: += vmpybsu({{.*}},{{.*}})
; Vector polynomial multiply halfwords
declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32)
@@ -1519,11 +1519,11 @@ define i64 @M4_vpmpyh(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vpmpyh({{.*}}, {{.*}})
+; CHECK: = vpmpyh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32)
define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: ^= vpmpyh({{.*}}, {{.*}})
+; CHECK: ^= vpmpyh({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
index 3e044e3838d..9260790e33a 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
@@ -141,28 +141,28 @@ define i64 @S2_shuffeb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffeb({{.*}}, {{.*}})
+; CHECK: = shuffeb({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffob(i64, i64)
define i64 @S2_shuffob(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffob({{.*}}, {{.*}})
+; CHECK: = shuffob({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffeh(i64, i64)
define i64 @S2_shuffeh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffeh({{.*}}, {{.*}})
+; CHECK: = shuffeh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffoh(i64, i64)
define i64 @S2_shuffoh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffoh({{.*}}, {{.*}})
+; CHECK: = shuffoh({{.*}},{{.*}})
; Vector splat bytes
declare i32 @llvm.hexagon.S2.vsplatrb(i32)
@@ -186,14 +186,14 @@ define i64 @S2_vspliceib(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: = vspliceb({{.*}}, {{.*}}, #0)
+; CHECK: = vspliceb({{.*}},{{.*}},#0)
declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32)
define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vspliceb({{.*}}, {{.*}}, {{.*}})
+; CHECK: = vspliceb({{.*}},{{.*}},{{.*}})
; Vector sign extend
declare i64 @llvm.hexagon.S2.vsxtbh(i32)
@@ -230,14 +230,14 @@ define i64 @S2_vtrunowh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vtrunowh({{.*}}, {{.*}})
+; CHECK: = vtrunowh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64)
define i64 @S2_vtrunewh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vtrunewh({{.*}}, {{.*}})
+; CHECK: = vtrunewh({{.*}},{{.*}})
; Vector zero extend
declare i64 @llvm.hexagon.S2.vzxtbh(i32)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
index f06339b9a85..506dc88d3c1 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
@@ -10,42 +10,42 @@ define i32 @A4_cmpbgt(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpb.gt({{.*}}, {{.*}})
+; CHECK: = cmpb.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32)
define i32 @A4_cmpbeq(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpb.eq({{.*}}, {{.*}})
+; CHECK: = cmpb.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32)
define i32 @A4_cmpbgtu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpb.gtu({{.*}}, {{.*}})
+; CHECK: = cmpb.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32)
define i32 @A4_cmpbgti(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmpb.gt({{.*}}, #0)
+; CHECK: = cmpb.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32)
define i32 @A4_cmpbeqi(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmpb.eq({{.*}}, #0)
+; CHECK: = cmpb.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32)
define i32 @A4_cmpbgtui(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmpb.gtu({{.*}}, #0)
+; CHECK: = cmpb.gtu({{.*}},#0)
; Compare half
declare i32 @llvm.hexagon.A4.cmphgt(i32, i32)
@@ -53,42 +53,42 @@ define i32 @A4_cmphgt(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmph.gt({{.*}}, {{.*}})
+; CHECK: = cmph.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpheq(i32, i32)
define i32 @A4_cmpheq(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmph.eq({{.*}}, {{.*}})
+; CHECK: = cmph.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32)
define i32 @A4_cmphgtu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmph.gtu({{.*}}, {{.*}})
+; CHECK: = cmph.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmphgti(i32, i32)
define i32 @A4_cmphgti(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmph.gt({{.*}}, #0)
+; CHECK: = cmph.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32)
define i32 @A4_cmpheqi(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmph.eq({{.*}}, #0)
+; CHECK: = cmph.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32)
define i32 @A4_cmphgtui(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmph.gtu({{.*}}, #0)
+; CHECK: = cmph.gtu({{.*}},#0)
; Compare doublewords
declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64)
@@ -96,21 +96,21 @@ define i32 @C2_cmpgtp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = cmp.gt({{.*}}, {{.*}})
+; CHECK: = cmp.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64)
define i32 @C2_cmpeqp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = cmp.eq({{.*}}, {{.*}})
+; CHECK: = cmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64)
define i32 @C2_cmpgtup(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = cmp.gtu({{.*}}, {{.*}})
+; CHECK: = cmp.gtu({{.*}},{{.*}})
; Compare bitmask
declare i32 @llvm.hexagon.C2.bitsclri(i32, i32)
@@ -118,42 +118,42 @@ define i32 @C2_bitsclri(i32 %a) {
%z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = bitsclr({{.*}}, #0)
+; CHECK: = bitsclr({{.*}},#0)
declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32)
define i32 @C4_nbitsclri(i32 %a) {
%z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = !bitsclr({{.*}}, #0)
+; CHECK: = !bitsclr({{.*}},#0)
declare i32 @llvm.hexagon.C2.bitsset(i32, i32)
define i32 @C2_bitsset(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = bitsset({{.*}}, {{.*}})
+; CHECK: = bitsset({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.nbitsset(i32, i32)
define i32 @C4_nbitsset(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !bitsset({{.*}}, {{.*}})
+; CHECK: = !bitsset({{.*}},{{.*}})
declare i32 @llvm.hexagon.C2.bitsclr(i32, i32)
define i32 @C2_bitsclr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = bitsclr({{.*}}, {{.*}})
+; CHECK: = bitsclr({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32)
define i32 @C4_nbitsclr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !bitsclr({{.*}}, {{.*}})
+; CHECK: = !bitsclr({{.*}},{{.*}})
; Mask generate from predicate
declare i64 @llvm.hexagon.C2.mask(i32)
@@ -169,7 +169,7 @@ define i32 @A4_tlbmatch(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = tlbmatch({{.*}}, {{.*}})
+; CHECK: = tlbmatch({{.*}},{{.*}})
; Test bit
declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32)
@@ -177,28 +177,28 @@ define i32 @S2_tstbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = tstbit({{.*}}, #0)
+; CHECK: = tstbit({{.*}},#0)
declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32)
define i32 @S4_ntstbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = !tstbit({{.*}}, #0)
+; CHECK: = !tstbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32)
define i32 @S2_tstbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = tstbit({{.*}}, {{.*}})
+; CHECK: = tstbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32)
define i32 @S4_ntstbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !tstbit({{.*}}, {{.*}})
+; CHECK: = !tstbit({{.*}},{{.*}})
; Vector compare halfwords
declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64)
@@ -206,42 +206,42 @@ define i32 @A2_vcmpheq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmph.eq({{.*}}, {{.*}})
+; CHECK: = vcmph.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64)
define i32 @A2_vcmphgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmph.gt({{.*}}, {{.*}})
+; CHECK: = vcmph.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64)
define i32 @A2_vcmphgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmph.gtu({{.*}}, {{.*}})
+; CHECK: = vcmph.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32)
define i32 @A4_vcmpheqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmph.eq({{.*}}, #0)
+; CHECK: = vcmph.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32)
define i32 @A4_vcmphgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmph.gt({{.*}}, #0)
+; CHECK: = vcmph.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32)
define i32 @A4_vcmphgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmph.gtu({{.*}}, #0)
+; CHECK: = vcmph.gtu({{.*}},#0)
; Vector compare bytes for any match
declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64)
@@ -249,7 +249,7 @@ define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = any8(vcmpb.eq({{.*}}, {{.*}}))
+; CHECK: = any8(vcmpb.eq({{.*}},{{.*}}))
; Vector compare bytes
declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64)
@@ -257,42 +257,42 @@ define i32 @A2_vcmpbeq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpb.eq({{.*}}, {{.*}})
+; CHECK: = vcmpb.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64)
define i32 @A2_vcmpbgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpb.gtu({{.*}}, {{.*}})
+; CHECK: = vcmpb.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64)
define i32 @A4_vcmpbgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpb.gt({{.*}}, {{.*}})
+; CHECK: = vcmpb.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32)
define i32 @A4_vcmpbeqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpb.eq({{.*}}, #0)
+; CHECK: = vcmpb.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32)
define i32 @A4_vcmpbgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpb.gt({{.*}}, #0)
+; CHECK: = vcmpb.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32)
define i32 @A4_vcmpbgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpb.gtu({{.*}}, #0)
+; CHECK: = vcmpb.gtu({{.*}},#0)
; Vector compare words
declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64)
@@ -300,42 +300,42 @@ define i32 @A2_vcmpweq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpw.eq({{.*}}, {{.*}})
+; CHECK: = vcmpw.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64)
define i32 @A2_vcmpwgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpw.gt({{.*}}, {{.*}})
+; CHECK: = vcmpw.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64)
define i32 @A2_vcmpwgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpw.gtu({{.*}}, {{.*}})
+; CHECK: = vcmpw.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32)
define i32 @A4_vcmpweqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpw.eq({{.*}}, #0)
+; CHECK: = vcmpw.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32)
define i32 @A4_vcmpwgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpw.gt({{.*}}, #0)
+; CHECK: = vcmpw.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32)
define i32 @A4_vcmpwgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpw.gtu({{.*}}, #0)
+; CHECK: = vcmpw.gtu({{.*}},#0)
; Viterbi pack even and odd predicate bitsclr
declare i32 @llvm.hexagon.C2.vitpack(i32, i32)
@@ -343,7 +343,7 @@ define i32 @C2_vitpack(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vitpack({{.*}}, {{.*}})
+; CHECK: = vitpack({{.*}},{{.*}})
; Vector mux
declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64)
@@ -351,4 +351,4 @@ define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: = vmux({{.*}}, {{.*}}, {{.*}})
+; CHECK: = vmux({{.*}},{{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
index 1a65f44c195..8809baf3551 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
@@ -10,42 +10,42 @@ define i64 @S2_asr_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = asr({{.*}}, #0)
+; CHECK: = asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32)
define i64 @S2_lsr_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = lsr({{.*}}, #0)
+; CHECK: = lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32)
define i64 @S2_asl_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = asl({{.*}}, #0)
+; CHECK: = asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32)
define i32 @S2_asr_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asr({{.*}}, #0)
+; CHECK: = asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32)
define i32 @S2_lsr_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = lsr({{.*}}, #0)
+; CHECK: = lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32)
define i32 @S2_asl_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asl({{.*}}, #0)
+; CHECK: = asl({{.*}},#0)
; Shift by immediate and accumulate
declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32)
@@ -53,84 +53,84 @@ define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: -= asr({{.*}}, #0)
+; CHECK: -= asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32)
define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: -= lsr({{.*}}, #0)
+; CHECK: -= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32)
define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: -= asl({{.*}}, #0)
+; CHECK: -= asl({{.*}},#0)
declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32)
define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: += asr({{.*}}, #0)
+; CHECK: += asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32)
define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: += lsr({{.*}}, #0)
+; CHECK: += lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32)
define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: += asl({{.*}}, #0)
+; CHECK: += asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32)
define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= asr({{.*}}, #0)
+; CHECK: -= asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32)
define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= lsr({{.*}}, #0)
+; CHECK: -= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32)
define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= asl({{.*}}, #0)
+; CHECK: -= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32)
define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += asr({{.*}}, #0)
+; CHECK: += asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32)
define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += lsr({{.*}}, #0)
+; CHECK: += lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32)
define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += asl({{.*}}, #0)
+; CHECK: += asl({{.*}},#0)
; Shift by immediate and add
declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32)
@@ -138,35 +138,35 @@ define i32 @S4_addi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(#0, asl({{.*}}, #0))
+; CHECK: = add(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32)
define i32 @S4_subi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = sub(#0, asl({{.*}}, #0))
+; CHECK: = sub(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32)
define i32 @S4_addi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(#0, lsr({{.*}}, #0))
+; CHECK: = add(#0,lsr({{.*}},#0))
declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32)
define i32 @S4_subi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = sub(#0, lsr({{.*}}, #0))
+; CHECK: = sub(#0,lsr({{.*}},#0))
declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32)
define i32 @S2_addasl_rrri(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = addasl({{.*}}, {{.*}}, #0)
+; CHECK: = addasl({{.*}},{{.*}},#0)
; Shift by immediate and logical
declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32)
@@ -174,140 +174,140 @@ define i64 @S2_asr_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: &= asr({{.*}}, #0)
+; CHECK: &= asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32)
define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: {{.*}} &= lsr({{.*}}, #0)
+; CHECK: {{.*}} &= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32)
define i64 @S2_asl_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: &= asl({{.*}}, #0)
+; CHECK: &= asl({{.*}},#0)
declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32)
define i64 @S2_asr_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: |= asr({{.*}}, #0)
+; CHECK: |= asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32)
define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: |= lsr({{.*}}, #0)
+; CHECK: |= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32)
define i64 @S2_asl_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: |= asl({{.*}}, #0)
+; CHECK: |= asl({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32)
define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: ^= lsr({{.*}}, #0)
+; CHECK: ^= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32)
define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: ^= asl({{.*}}, #0)
+; CHECK: ^= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32)
define i32 @S2_asr_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: &= asr({{.*}}, #0)
+; CHECK: &= asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32)
define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: &= lsr({{.*}}, #0)
+; CHECK: &= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32)
define i32 @S2_asl_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: &= asl({{.*}}, #0)
+; CHECK: &= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32)
define i32 @S2_asr_i_r_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= asr({{.*}}, #0)
+; CHECK: |= asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32)
define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= lsr({{.*}}, #0)
+; CHECK: |= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32)
define i32 @S2_asl_i_r_or(i32%a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= asl({{.*}}, #0)
+; CHECK: |= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32)
define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: ^= lsr({{.*}}, #0)
+; CHECK: ^= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32)
define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: ^= asl({{.*}}, #0)
+; CHECK: ^= asl({{.*}},#0)
declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32)
define i32 @S4_andi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = and(#0, asl({{.*}}, #0))
+; CHECK: = and(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32)
define i32 @S4_ori_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = or(#0, asl({{.*}}, #0))
+; CHECK: = or(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32)
define i32 @S4_andi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = and(#0, lsr({{.*}}, #0))
+; CHECK: = and(#0,lsr({{.*}},#0))
declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32)
define i32 @S4_ori_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = or(#0, lsr({{.*}}, #0))
+; CHECK: = or(#0,lsr({{.*}},#0))
; Shift right by immediate with rounding
declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32)
@@ -315,14 +315,14 @@ define i64 @S2_asr_i_p_rnd(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = asr({{.*}}, #0):rnd
+; CHECK: = asr({{.*}},#0):rnd
declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32)
define i32 @S2_asr_i_r_rnd(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asr({{.*}}, #0):rnd
+; CHECK: = asr({{.*}},#0):rnd
; Shift left by immediate with saturation
declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32)
@@ -330,7 +330,7 @@ define i32 @S2_asl_i_r_sat(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asl({{.*}}, #0):sat
+; CHECK: = asl({{.*}},#0):sat
; Shift by register
declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32)
@@ -338,63 +338,63 @@ define i64 @S2_asr_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = asr({{.*}}, {{.*}})
+; CHECK: = asr({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32)
define i64 @S2_lsr_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = lsr({{.*}}, {{.*}})
+; CHECK: = lsr({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32)
define i64 @S2_asl_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = asl({{.*}}, {{.*}})
+; CHECK: = asl({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32)
define i64 @S2_lsl_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = lsl({{.*}}, {{.*}})
+; CHECK: = lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32)
define i32 @S2_asr_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asr({{.*}}, {{.*}})
+; CHECK: = asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32)
define i32 @S2_lsr_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = lsr({{.*}}, {{.*}})
+; CHECK: = lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32)
define i32 @S2_asl_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asl({{.*}}, {{.*}})
+; CHECK: = asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32)
define i32 @S2_lsl_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = lsl({{.*}}, {{.*}})
+; CHECK: = lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.lsli(i32, i32)
define i32 @S4_lsli(i32 %a) {
%z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a)
ret i32 %z
}
-; CHECK: = lsl(#0, {{.*}})
+; CHECK: = lsl(#0,{{.*}})
; Shift by register and accumulate
declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32)
@@ -402,112 +402,112 @@ define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= asr({{.*}}, r4)
+; CHECK: -= asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32)
define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= lsr({{.*}}, r4)
+; CHECK: -= lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32)
define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= asl({{.*}}, r4)
+; CHECK: -= asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32)
define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= lsl({{.*}}, r4)
+; CHECK: -= lsl({{.*}},r4)
declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32)
define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += asr({{.*}}, r4)
+; CHECK: += asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32)
define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += lsr({{.*}}, r4)
+; CHECK: += lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32)
define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += asl({{.*}}, r4)
+; CHECK: += asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32)
define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += lsl({{.*}}, r4)
+; CHECK: += lsl({{.*}},r4)
declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32)
define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= asr({{.*}}, {{.*}})
+; CHECK: -= asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32)
define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= lsr({{.*}}, {{.*}})
+; CHECK: -= lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32)
define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= asl({{.*}}, {{.*}})
+; CHECK: -= asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32)
define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= lsl({{.*}}, {{.*}})
+; CHECK: -= lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32)
define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += asr({{.*}}, {{.*}})
+; CHECK: += asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32)
define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += lsr({{.*}}, {{.*}})
+; CHECK: += lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32)
define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += asl({{.*}}, {{.*}})
+; CHECK: += asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32)
define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += lsl({{.*}}, {{.*}})
+; CHECK: += lsl({{.*}},{{.*}})
; Shift by register and logical
declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32)
@@ -515,112 +515,112 @@ define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= asr({{.*}}, r4)
+; CHECK: |= asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32)
define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= lsr({{.*}}, r4)
+; CHECK: |= lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32)
define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= asl({{.*}}, r4)
+; CHECK: |= asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32)
define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= lsl({{.*}}, r4)
+; CHECK: |= lsl({{.*}},r4)
declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32)
define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= asr({{.*}}, r4)
+; CHECK: &= asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32)
define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= lsr({{.*}}, r4)
+; CHECK: &= lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32)
define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= asl({{.*}}, r4)
+; CHECK: &= asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32)
define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= lsl({{.*}}, r4)
+; CHECK: &= lsl({{.*}},r4)
declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32)
define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= asr({{.*}}, {{.*}})
+; CHECK: |= asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32)
define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= lsr({{.*}}, {{.*}})
+; CHECK: |= lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32)
define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= asl({{.*}}, {{.*}})
+; CHECK: |= asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32)
define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= lsl({{.*}}, {{.*}})
+; CHECK: |= lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32)
define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= asr({{.*}}, {{.*}})
+; CHECK: &= asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32)
define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= lsr({{.*}}, {{.*}})
+; CHECK: &= lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32)
define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= asl({{.*}}, {{.*}})
+; CHECK: &= asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32)
define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= lsl({{.*}}, {{.*}})
+; CHECK: &= lsl({{.*}},{{.*}})
; Shift by register with saturation
declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32)
@@ -628,14 +628,14 @@ define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asr({{.*}}, {{.*}}):sat
+; CHECK: = asr({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32)
define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asl({{.*}}, {{.*}}):sat
+; CHECK: = asl({{.*}},{{.*}}):sat
; Vector shift halfwords by immediate
declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32)
@@ -643,21 +643,21 @@ define i64 @S2_asr_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vasrh({{.*}}, #0)
+; CHECK: = vasrh({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32)
define i64 @S2_lsr_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vlsrh({{.*}}, #0)
+; CHECK: = vlsrh({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32)
define i64 @S2_asl_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vaslh({{.*}}, #0)
+; CHECK: = vaslh({{.*}},#0)
; Vector shift halfwords by register
declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32)
@@ -665,28 +665,28 @@ define i64 @S2_asr_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vasrh({{.*}}, {{.*}})
+; CHECK: = vasrh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32)
define i64 @S2_lsr_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vlsrh({{.*}}, {{.*}})
+; CHECK: = vlsrh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32)
define i64 @S2_asl_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vaslh({{.*}}, {{.*}})
+; CHECK: = vaslh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32)
define i64 @S2_lsl_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vlslh({{.*}}, {{.*}})
+; CHECK: = vlslh({{.*}},{{.*}})
; Vector shift words by immediate
declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32)
@@ -694,21 +694,21 @@ define i64 @S2_asr_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vasrw({{.*}}, #0)
+; CHECK: = vasrw({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32)
define i64 @S2_lsr_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vlsrw({{.*}}, #0)
+; CHECK: = vlsrw({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32)
define i64 @S2_asl_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vaslw({{.*}}, #0)
+; CHECK: = vaslw({{.*}},#0)
; Vector shift words by with truncate and pack
declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32)
@@ -716,11 +716,11 @@ define i32 @S2_asr_i_svw_trun(i64 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vasrw({{.*}}, #0)
+; CHECK: = vasrw({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32)
define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vasrw({{.*}}, {{.*}})
+; CHECK: = vasrw({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/newvalueSameReg.ll b/test/CodeGen/Hexagon/newvalueSameReg.ll
index 0fc4df22eb3..39f32fb2f9d 100644
--- a/test/CodeGen/Hexagon/newvalueSameReg.ll
+++ b/test/CodeGen/Hexagon/newvalueSameReg.ll
@@ -12,8 +12,8 @@
; Test that we don't generate a new value compare if the operands are
; the same register.
-; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new, [[REG0]])
-; CHECK: cmp.eq([[REG1:(r[0-9]+)]], [[REG1]])
+; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new,[[REG0]])
+; CHECK: cmp.eq([[REG1:(r[0-9]+)]],[[REG1]])
; Function Attrs: nounwind
declare void @fprintf(%struct._Dnk_filet.1* nocapture, i8* nocapture readonly, ...) #1
diff --git a/test/CodeGen/Hexagon/newvaluejump.ll b/test/CodeGen/Hexagon/newvaluejump.ll
index 3e1ee179573..e1437f369c8 100644
--- a/test/CodeGen/Hexagon/newvaluejump.ll
+++ b/test/CodeGen/Hexagon/newvaluejump.ll
@@ -6,7 +6,7 @@
define i32 @foo(i32 %a) nounwind {
entry:
-; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}}
+; CHECK: if (cmp.eq(r{{[0-9]+}}.new,#0)) jump{{.}}
%addr1 = alloca i32, align 4
%addr2 = alloca i32, align 4
%0 = load i32, i32* @i, align 4
diff --git a/test/CodeGen/Hexagon/newvaluejump2.ll b/test/CodeGen/Hexagon/newvaluejump2.ll
index a812a7d9665..4c897f0830f 100644
--- a/test/CodeGen/Hexagon/newvaluejump2.ll
+++ b/test/CodeGen/Hexagon/newvaluejump2.ll
@@ -6,7 +6,7 @@
@Reg = common global i32 0, align 4
define i32 @main() nounwind {
entry:
-; CHECK: if (cmp.gt(r{{[0-9]+}}, r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
+; CHECK: if (cmp.gt(r{{[0-9]+}},r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
%Reg2 = alloca i32, align 4
%0 = load i32, i32* %Reg2, align 4
%1 = load i32, i32* @Reg, align 4
diff --git a/test/CodeGen/Hexagon/opt-addr-mode.ll b/test/CodeGen/Hexagon/opt-addr-mode.ll
index 7cb437c327c..705cd045ea3 100644
--- a/test/CodeGen/Hexagon/opt-addr-mode.ll
+++ b/test/CodeGen/Hexagon/opt-addr-mode.ll
@@ -2,10 +2,10 @@
; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 -disable-hexagon-amodeopt=0 -hexagon-amode-growth-limit=4 < %s | FileCheck %s --check-prefix=CHECK-AMODE
; CHECK-NO-AMODE: [[REG0:(r[0-9]+)]] = ##global_2
-; CHECK-NO-AMODE: memw([[REG0]] + {{.*}}<<#2) =
+; CHECK-NO-AMODE: memw([[REG0]]+{{.*}}<<#2) =
; CHECK-AMODE: [[REG1:(r[0-9]+)]] = memw(##global_1)
-; CHECK-AMODE: memw([[REG1]]<<#2 + ##global_2) =
+; CHECK-AMODE: memw([[REG1]]<<#2+##global_2) =
@global_1 = external global i32, align 4
@global_2 = external global [128 x i32], align 8
diff --git a/test/CodeGen/Hexagon/opt-fabs.ll b/test/CodeGen/Hexagon/opt-fabs.ll
index 2ecbce310ad..9c94f853ba5 100644
--- a/test/CodeGen/Hexagon/opt-fabs.ll
+++ b/test/CodeGen/Hexagon/opt-fabs.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=hexagon-unknown-elf -mcpu=hexagonv5 -hexagon-bit=0 < %s | FileCheck %s
; Optimize fabsf to clrbit in V5.
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
define float @my_fabsf(float %x) nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/opt-fneg.ll b/test/CodeGen/Hexagon/opt-fneg.ll
index 97895786586..da496c58801 100644
--- a/test/CodeGen/Hexagon/opt-fneg.ll
+++ b/test/CodeGen/Hexagon/opt-fneg.ll
@@ -3,7 +3,7 @@
define float @foo(float %x) nounwind {
entry:
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31)
%x.addr = alloca float, align 4
store float %x, float* %x.addr, align 4
%0 = load float, float* %x.addr, align 4
@@ -13,14 +13,14 @@ entry:
define float @bar(float %x) nounwind {
entry:
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31)
%sub = fsub float -0.000000e+00, %x
ret float %sub
}
define float @baz(float %x) nounwind {
entry:
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31)
%conv1 = fmul float %x, -1.000000e+00
ret float %conv1
}
diff --git a/test/CodeGen/Hexagon/opt-spill-volatile.ll b/test/CodeGen/Hexagon/opt-spill-volatile.ll
index 99dd4646d74..4f50a9a28a3 100644
--- a/test/CodeGen/Hexagon/opt-spill-volatile.ll
+++ b/test/CodeGen/Hexagon/opt-spill-volatile.ll
@@ -6,7 +6,7 @@ target triple = "hexagon"
; CHECK-LABEL: foo
; CHECK: memw(r29+#4) =
-; CHECK: = memw(r29 + #4)
+; CHECK: = memw(r29+#4)
define i32 @foo(i32 %a) #0 {
entry:
%x = alloca i32, align 4
diff --git a/test/CodeGen/Hexagon/pic-local.ll b/test/CodeGen/Hexagon/pic-local.ll
index 48b0096aa65..6544b3d3216 100644
--- a/test/CodeGen/Hexagon/pic-local.ll
+++ b/test/CodeGen/Hexagon/pic-local.ll
@@ -9,11 +9,11 @@ define internal void @f2() {
}
define void()* @get_f1() {
- ; CHECK: r0 = add(pc, ##.Lf1@PCREL)
+ ; CHECK: r0 = add(pc,##.Lf1@PCREL)
ret void()* @f1
}
define void()* @get_f2() {
- ; CHECK: r0 = add(pc, ##f2@PCREL)
+ ; CHECK: r0 = add(pc,##f2@PCREL)
ret void()* @f2
}
diff --git a/test/CodeGen/Hexagon/pic-simple.ll b/test/CodeGen/Hexagon/pic-simple.ll
index 46d95204f2e..aeb21ef7de1 100644
--- a/test/CodeGen/Hexagon/pic-simple.ll
+++ b/test/CodeGen/Hexagon/pic-simple.ll
@@ -1,8 +1,8 @@
; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL)
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##src@GOT)
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##dst@GOT)
+; CHECK: r{{[0-9]+}} = add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##src@GOT)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##dst@GOT)
@dst = external global i32
@src = external global i32
diff --git a/test/CodeGen/Hexagon/pic-static.ll b/test/CodeGen/Hexagon/pic-static.ll
index 66d7734f2cf..95da5f060d7 100644
--- a/test/CodeGen/Hexagon/pic-static.ll
+++ b/test/CodeGen/Hexagon/pic-static.ll
@@ -1,8 +1,8 @@
; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s
-; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL)
-; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##x@PCREL)
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##bar@GOT)
+; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}},##x@PCREL)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##bar@GOT)
@x = internal global i32 9, align 4
@bar = external global i32*
diff --git a/test/CodeGen/Hexagon/predicate-logical.ll b/test/CodeGen/Hexagon/predicate-logical.ll
index be2bcb03d6a..e3ba4d8643d 100644
--- a/test/CodeGen/Hexagon/predicate-logical.ll
+++ b/test/CodeGen/Hexagon/predicate-logical.ll
@@ -1,5 +1,5 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK: p{{[0-9]}} = or(p{{[0-9]}}, and(p{{[0-9]}}, p{{[0-9]}}))
+; CHECK: p{{[0-9]}} = or(p{{[0-9]}},and(p{{[0-9]}},p{{[0-9]}}))
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/predicate-rcmp.ll b/test/CodeGen/Hexagon/predicate-rcmp.ll
index 45daa88d716..78991e0dbe7 100644
--- a/test/CodeGen/Hexagon/predicate-rcmp.ll
+++ b/test/CodeGen/Hexagon/predicate-rcmp.ll
@@ -1,5 +1,5 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK: cmp.eq(r{{[0-9]+}}, #0)
+; CHECK: cmp.eq(r{{[0-9]+}},#0)
; Check that the result of the builtin is not stored directly, i.e. that
; there is an instruction that converts it to {0,1} from {0,-1}. Right now
; the instruction is "r4 = !cmp.eq(r0, #0)".
diff --git a/test/CodeGen/Hexagon/ret-struct-by-val.ll b/test/CodeGen/Hexagon/ret-struct-by-val.ll
index 26ed2ff36f7..60a97bcccfc 100644
--- a/test/CodeGen/Hexagon/ret-struct-by-val.ll
+++ b/test/CodeGen/Hexagon/ret-struct-by-val.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r0 = add(r0, r1)
+; CHECK: r0 = add(r0,r1)
; Allow simple structures to be returned by value.
diff --git a/test/CodeGen/Hexagon/signed_immediates.ll b/test/CodeGen/Hexagon/signed_immediates.ll
index a4766313cc6..ad4aa259660 100644
--- a/test/CodeGen/Hexagon/signed_immediates.ll
+++ b/test/CodeGen/Hexagon/signed_immediates.ll
@@ -33,7 +33,7 @@ define i64* @foo4(i64* %a, i64 %b) {
}
; s6Ext
-; CHECK: if (p0.new) memw(r0+#0)=#-1
+; CHECK: if (p0.new) memw(r0+#0) = #-1
define void @foo5(i32* %a, i1 %b) {
br i1 %b, label %x, label %y
x:
@@ -44,7 +44,7 @@ y:
}
; s10Ext
-; CHECK: p0 = cmp.eq(r0, #-1)
+; CHECK: p0 = cmp.eq(r0,#-1)
define i1 @foo7(i32 %a) {
%b = icmp eq i32 %a, -1
ret i1 %b
@@ -96,4 +96,4 @@ y:
; CHECK: r0 = #-2
define i32 @foo13() {
ret i32 -2
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/Hexagon/stack-align1.ll b/test/CodeGen/Hexagon/stack-align1.ll
index 4efa70f5985..aefd16594f0 100644
--- a/test/CodeGen/Hexagon/stack-align1.ll
+++ b/test/CodeGen/Hexagon/stack-align1.ll
@@ -1,7 +1,7 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK: and(r29, #-32)
-; CHECK-DAG: add(r29, #0)
-; CHECK-DAG: add(r29, #28)
+; CHECK: and(r29,#-32)
+; CHECK-DAG: add(r29,#0)
+; CHECK-DAG: add(r29,#28)
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/stack-align2.ll b/test/CodeGen/Hexagon/stack-align2.ll
index 1bbd5782032..042e4097c56 100644
--- a/test/CodeGen/Hexagon/stack-align2.ll
+++ b/test/CodeGen/Hexagon/stack-align2.ll
@@ -1,9 +1,9 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK: and(r29, #-128)
-; CHECK-DAG: add(r29, #0)
-; CHECK-DAG: add(r29, #64)
-; CHECK-DAG: add(r29, #96)
-; CHECK-DAG: add(r29, #124)
+; CHECK: and(r29,#-128)
+; CHECK-DAG: add(r29,#0)
+; CHECK-DAG: add(r29,#64)
+; CHECK-DAG: add(r29,#96)
+; CHECK-DAG: add(r29,#124)
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/stack-alloca1.ll b/test/CodeGen/Hexagon/stack-alloca1.ll
index 00e9e051aeb..b38b8846d26 100644
--- a/test/CodeGen/Hexagon/stack-alloca1.ll
+++ b/test/CodeGen/Hexagon/stack-alloca1.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK: sub(r29, r[[REG:[0-9]+]])
+; CHECK: sub(r29,r[[REG:[0-9]+]])
; CHECK: r29 = r[[REG]]
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/stack-alloca2.ll b/test/CodeGen/Hexagon/stack-alloca2.ll
index ad5e13166aa..b211be0c0ff 100644
--- a/test/CodeGen/Hexagon/stack-alloca2.ll
+++ b/test/CodeGen/Hexagon/stack-alloca2.ll
@@ -1,8 +1,8 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK-DAG: r[[AP:[0-9]+]] = and(r30, #-32)
-; CHECK-DAG: r1 = add(r[[AP]], #-32)
+; CHECK-DAG: r[[AP:[0-9]+]] = and(r30,#-32)
+; CHECK-DAG: r1 = add(r[[AP]],#-32)
-; CHECK-DAG: sub(r29, r[[SP:[0-9]+]])
+; CHECK-DAG: sub(r29,r[[SP:[0-9]+]])
; CHECK-DAG: r29 = r[[SP]]
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/store-shift.ll b/test/CodeGen/Hexagon/store-shift.ll
index 866930990ba..981071a0181 100644
--- a/test/CodeGen/Hexagon/store-shift.ll
+++ b/test/CodeGen/Hexagon/store-shift.ll
@@ -1,12 +1,12 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK-DAG: r[[BASE:[0-9]+]] += add
-; CHECK-DAG: r[[IDX0:[0-9]+]] = add(r2, #5)
-; CHECK-DAG: r[[IDX1:[0-9]+]] = add(r2, #6)
-; CHECK-DAG: memw(r0 + r[[IDX0]]<<#2) = r3
-; CHECK-DAG: memw(r0 + r[[IDX1]]<<#2) = r3
-; CHECK-DAG: memw(r[[BASE]] + r[[IDX0]]<<#2) = r[[IDX0]]
-; CHECK-DAG: memw(r[[BASE]] + r[[IDX1]]<<#2) = r[[IDX0]]
+; CHECK-DAG: r[[IDX0:[0-9]+]] = add(r2,#5)
+; CHECK-DAG: r[[IDX1:[0-9]+]] = add(r2,#6)
+; CHECK-DAG: memw(r0+r[[IDX0]]<<#2) = r3
+; CHECK-DAG: memw(r0+r[[IDX1]]<<#2) = r3
+; CHECK-DAG: memw(r[[BASE]]+r[[IDX0]]<<#2) = r[[IDX0]]
+; CHECK-DAG: memw(r[[BASE]]+r[[IDX1]]<<#2) = r[[IDX0]]
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/sube.ll b/test/CodeGen/Hexagon/sube.ll
index 7bc00759303..861f361a2c5 100644
--- a/test/CodeGen/Hexagon/sube.ll
+++ b/test/CodeGen/Hexagon/sube.ll
@@ -1,13 +1,13 @@
; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0)
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1)
-; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0,#0)
+; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0,#1)
+; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}},r{{[0-9]+}})
define void @check_sube_subc(i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) {
entry:
diff --git a/test/CodeGen/Hexagon/subi-asl.ll b/test/CodeGen/Hexagon/subi-asl.ll
index f0b27e828f5..d7610ceb62a 100644
--- a/test/CodeGen/Hexagon/subi-asl.ll
+++ b/test/CodeGen/Hexagon/subi-asl.ll
@@ -3,11 +3,11 @@
; Check if S4_subi_asl_ri is being generated correctly.
; CHECK-LABEL: yes_sub_asl
-; CHECK: [[REG1:(r[0-9]+)]] = sub(#0, asl([[REG1]], #1))
+; CHECK: [[REG1:(r[0-9]+)]] = sub(#0,asl([[REG1]],#1))
; CHECK-LABEL: no_sub_asl
-; CHECK: [[REG2:(r[0-9]+)]] = asl(r{{[0-9]+}}, #1)
-; CHECK: r{{[0-9]+}} = sub([[REG2]], r{{[0-9]+}})
+; CHECK: [[REG2:(r[0-9]+)]] = asl(r{{[0-9]+}},#1)
+; CHECK: r{{[0-9]+}} = sub([[REG2]],r{{[0-9]+}})
%struct.rtx_def = type { i16, i8 }
diff --git a/test/CodeGen/Hexagon/swp-const-tc.ll b/test/CodeGen/Hexagon/swp-const-tc.ll
index 3113094d2ba..c07d23623eb 100644
--- a/test/CodeGen/Hexagon/swp-const-tc.ll
+++ b/test/CodeGen/Hexagon/swp-const-tc.ll
@@ -4,7 +4,7 @@
; of computing a new LC0 value.
; CHECK-LABEL: @test
-; CHECK: loop0(.LBB0_1, #998)
+; CHECK: loop0(.LBB0_1,#998)
define i32 @test(i32* %A, i32* %B, i32 %count) {
entry:
diff --git a/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/test/CodeGen/Hexagon/swp-matmul-bitext.ll
index db5bb96d0bc..9c425ae6a09 100644
--- a/test/CodeGen/Hexagon/swp-matmul-bitext.ll
+++ b/test/CodeGen/Hexagon/swp-matmul-bitext.ll
@@ -11,7 +11,7 @@
; CHECK: [[REG0:(r[0-9]+)]] = memh
; CHECK: [[REG1:(r[0-9]+)]] = memh
; CHECK: += mpyi
-; CHECK: [[REG2]] = mpyi([[REG0]], [[REG1]])
+; CHECK: [[REG2]] = mpyi([[REG0]],[[REG1]])
; CHECK: endloop0
%union_h2_sem_t = type { i32 }
diff --git a/test/CodeGen/Hexagon/swp-max.ll b/test/CodeGen/Hexagon/swp-max.ll
index 038138ff256..26238ea6fb3 100644
--- a/test/CodeGen/Hexagon/swp-max.ll
+++ b/test/CodeGen/Hexagon/swp-max.ll
@@ -15,8 +15,8 @@ for.body.preheader:
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: [[REG1:(r[0-9]+)]] = max(r{{[0-9]+}}, [[REG1]])
-; CHECK: [[REG0:(r[0-9]+)]] = add([[REG2:(r[0-9]+)]], [[REG0]])
+; CHECK: [[REG1:(r[0-9]+)]] = max(r{{[0-9]+}},[[REG1]])
+; CHECK: [[REG0:(r[0-9]+)]] = add([[REG2:(r[0-9]+)]],[[REG0]])
; CHECK: [[REG2]] = memw
; CHECK: endloop0
diff --git a/test/CodeGen/Hexagon/swp-multi-loops.ll b/test/CodeGen/Hexagon/swp-multi-loops.ll
index 56e8c651100..fc2576af8ac 100644
--- a/test/CodeGen/Hexagon/swp-multi-loops.ll
+++ b/test/CodeGen/Hexagon/swp-multi-loops.ll
@@ -5,15 +5,15 @@
; Check if the first loop is pipelined.
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4)
+; CHECK: add(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-NEXT: memw(r{{[0-9]+}}++#4)
; CHECK-NEXT: endloop0
; Check if the second loop is pipelined.
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4)
+; CHECK: add(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-NEXT: memw(r{{[0-9]+}}++#4)
; CHECK-NEXT: endloop0
define i32 @test(i32* %a, i32 %n, i32 %l) {
diff --git a/test/CodeGen/Hexagon/swp-stages4.ll b/test/CodeGen/Hexagon/swp-stages4.ll
index cdd09845ba5..f58e8320315 100644
--- a/test/CodeGen/Hexagon/swp-stages4.ll
+++ b/test/CodeGen/Hexagon/swp-stages4.ll
@@ -6,8 +6,8 @@
; CHECK: = and
; CHECK: = and
; CHECK: = and
-; CHECK: [[REG0:(r[0-9]+)]] = and([[REG1:(r[0-9]+)]], #255)
-; CHECK-NOT: [[REG0]] = and([[REG1]], #255)
+; CHECK: [[REG0:(r[0-9]+)]] = and([[REG1:(r[0-9]+)]],#255)
+; CHECK-NOT: [[REG0]] = and([[REG1]],#255)
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
; CHECK: [[REG0]] += add
diff --git a/test/CodeGen/Hexagon/swp-stages5.ll b/test/CodeGen/Hexagon/swp-stages5.ll
index f83aa32ae0a..fdfb2101cd3 100644
--- a/test/CodeGen/Hexagon/swp-stages5.ll
+++ b/test/CodeGen/Hexagon/swp-stages5.ll
@@ -7,7 +7,7 @@
; CHECK-DAG: [[REG0:(r[0-9]+)]] = memub(r{{[0-9]+}}++#1)
; CHECK-DAG: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: = and([[REG0]], #255)
+; CHECK: = and([[REG0]],#255)
; CHECK: [[REG0]]{{[:0-9]*}} =
; CHECK: endloop
diff --git a/test/CodeGen/Hexagon/swp-vmult.ll b/test/CodeGen/Hexagon/swp-vmult.ll
index 9018405274c..7c53248f47f 100644
--- a/test/CodeGen/Hexagon/swp-vmult.ll
+++ b/test/CodeGen/Hexagon/swp-vmult.ll
@@ -2,10 +2,10 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -O3 < %s | FileCheck %s
; Multiply and accumulate
-; CHECK: mpyi([[REG0:r([0-9]+)]], [[REG1:r([0-9]+)]])
-; CHECK-NEXT: add(r{{[0-9]+}}, #4)
-; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0)
-; CHECK-NEXT: [[REG1]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0)
+; CHECK: mpyi([[REG0:r([0-9]+)]],[[REG1:r([0-9]+)]])
+; CHECK-NEXT: add(r{{[0-9]+}},#4)
+; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+; CHECK-NEXT: [[REG1]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
; CHECK-NEXT: endloop0
define i32 @foo(i32* %a, i32* %b, i32 %n) {
diff --git a/test/CodeGen/Hexagon/swp-vsum.ll b/test/CodeGen/Hexagon/swp-vsum.ll
index 4756c644709..3561997450d 100644
--- a/test/CodeGen/Hexagon/swp-vsum.ll
+++ b/test/CodeGen/Hexagon/swp-vsum.ll
@@ -4,9 +4,9 @@
; Simple vector total.
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: add([[REG:r([0-9]+)]], r{{[0-9]+}})
-; CHECK-NEXT: add(r{{[0-9]+}}, #4)
-; CHECK-NEXT: [[REG]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0)
+; CHECK: add([[REG:r([0-9]+)]],r{{[0-9]+}})
+; CHECK-NEXT: add(r{{[0-9]+}},#4)
+; CHECK-NEXT: [[REG]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
; CHECK-NEXT: endloop0
define i32 @foo(i32* %a, i32 %n) {
diff --git a/test/CodeGen/Hexagon/tail-dup-subreg-map.ll b/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
index 08dadeb9aaa..1b11d087832 100644
--- a/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
+++ b/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
@@ -5,7 +5,7 @@
; subregisters were dropped by the tail duplicator, resulting in invalid
; COPY instructions being generated.
-; CHECK: = extractu(r{{[0-9]+}}, #15, #17)
+; CHECK: = extractu(r{{[0-9]+}},#15,#17)
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/tfr-to-combine.ll b/test/CodeGen/Hexagon/tfr-to-combine.ll
index 1b82f3e4562..50879ffe582 100644
--- a/test/CodeGen/Hexagon/tfr-to-combine.ll
+++ b/test/CodeGen/Hexagon/tfr-to-combine.ll
@@ -8,7 +8,7 @@
; Function Attrs: nounwind
define i64 @test1() #0 {
-; CHECK: combine(#10, #0)
+; CHECK: combine(#10,#0)
entry:
store i16 0, i16* @a, align 2
store i16 10, i16* @b, align 2
@@ -17,7 +17,7 @@ entry:
; Function Attrs: nounwind
define i64 @test2() #0 {
-; CHECK: combine(#0, r{{[0-9]+}})
+; CHECK: combine(#0,r{{[0-9]+}})
entry:
store i16 0, i16* @a, align 2
%0 = load i16, i16* @c, align 2
@@ -27,7 +27,7 @@ entry:
; Function Attrs: nounwind
define i64 @test4() #0 {
-; CHECK: combine(#0, #100)
+; CHECK: combine(#0,#100)
entry:
store i16 100, i16* @b, align 2
store i16 0, i16* @a, align 2
diff --git a/test/CodeGen/Hexagon/tls_pic.ll b/test/CodeGen/Hexagon/tls_pic.ll
index 190e1d71d39..2c2be0dc384 100644
--- a/test/CodeGen/Hexagon/tls_pic.ll
+++ b/test/CodeGen/Hexagon/tls_pic.ll
@@ -4,7 +4,7 @@
@src_ie = thread_local(initialexec) global i32 0, align 4
; CHECK-LABEL: test_initial_exec
-; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK-DAG: = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL)
; CHECK-DAG: = ##src_ie@IEGOT
; CHECK-DAG: = ##dst_ie@IEGOT
; CHECK-NOT: call
@@ -22,7 +22,7 @@ entry:
; general-dynamic model.
; CHECK-LABEL: test_dynamic
-; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK-DAG: = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL)
; CHECK-DAG: = ##src_gd@GDGOT
; CHECK-DAG: = ##dst_gd@GDGOT
; CHECK-DAG: call src_gd@GDPLT
diff --git a/test/CodeGen/Hexagon/two-crash.ll b/test/CodeGen/Hexagon/two-crash.ll
index 0ab02cda8a0..7e79cb3be91 100644
--- a/test/CodeGen/Hexagon/two-crash.ll
+++ b/test/CodeGen/Hexagon/two-crash.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; This testcase crashed, because we propagated a reg:sub into a tied use.
; The two-address pass rewrote it in a way that generated incorrect code.
-; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #16)
+; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}},#16)
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/vaddh.ll b/test/CodeGen/Hexagon/vaddh.ll
index 88194b750ad..a4fb33de4ac 100644
--- a/test/CodeGen/Hexagon/vaddh.ll
+++ b/test/CodeGen/Hexagon/vaddh.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; CHECK: vaddh(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: vaddh(r{{[0-9]+}},r{{[0-9]+}})
@j = external global i32
@k = external global i32
diff --git a/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
index 70c4aeb4bac..4bba134a40c 100644
--- a/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
+++ b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s
; This one should generate a combine with two immediates.
-; CHECK: combine(#7, #7)
+; CHECK: combine(#7,#7)
@B = common global [400 x i32] zeroinitializer, align 8
@A = common global [400 x i32] zeroinitializer, align 8
@C = common global [400 x i32] zeroinitializer, align 8
diff --git a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll
index 91b32652400..f49a1e24a1b 100644
--- a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll
+++ b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s
; Check that store is post-incremented.
-; CHECK: memuh(r{{[0-9]+}} + {{ *}}#6{{ *}})
-; CHECK: combine(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}{{ *}})
+; CHECK: memuh(r{{[0-9]+}}+#6)
+; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}})
; CHECK: vaddh
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
diff --git a/test/CodeGen/Hexagon/vect/vect-shift-imm.ll b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll
index 4861181d412..a4d6afa40bc 100644
--- a/test/CodeGen/Hexagon/vect/vect-shift-imm.ll
+++ b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll
@@ -6,12 +6,12 @@
; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-LSRH
;
; Make sure that the instructions with immediate operands are generated.
-; CHECK-ASLW: vaslw({{.*}}, #9)
-; CHECK-ASRW: vasrw({{.*}}, #8)
-; CHECK-LSRW: vlsrw({{.*}}, #7)
-; CHECK-ASLH: vaslh({{.*}}, #6)
-; CHECK-ASRH: vasrh({{.*}}, #5)
-; CHECK-LSRH: vlsrh({{.*}}, #4)
+; CHECK-ASLW: vaslw({{.*}},#9)
+; CHECK-ASRW: vasrw({{.*}},#8)
+; CHECK-LSRW: vlsrw({{.*}},#7)
+; CHECK-ASLH: vaslh({{.*}},#6)
+; CHECK-ASRH: vasrh({{.*}},#5)
+; CHECK-LSRH: vlsrh({{.*}},#4)
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/vect/vect-vshifts.ll b/test/CodeGen/Hexagon/vect/vect-vshifts.ll
index 49ff812601a..9d3cbe6e113 100644
--- a/test/CodeGen/Hexagon/vect/vect-vshifts.ll
+++ b/test/CodeGen/Hexagon/vect/vect-vshifts.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that store is post-incremented.
-; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}})
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/vect/vect-xor.ll b/test/CodeGen/Hexagon/vect/vect-xor.ll
index 96719e68341..8864ab5c5cb 100644
--- a/test/CodeGen/Hexagon/vect/vect-xor.ll
+++ b/test/CodeGen/Hexagon/vect/vect-xor.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s
; Check that the parsing succeeded.
-; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"