summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/AArch64/addsub.ll7
-rw-r--r--test/CodeGen/AArch64/arm64-atomic.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-shrink-wrapping.ll14
-rw-r--r--test/CodeGen/AArch64/compare-branch.ll2
-rw-r--r--test/CodeGen/AArch64/logical_shifted_reg.ll2
-rw-r--r--test/CodeGen/AArch64/tbz-tbnz.ll18
-rw-r--r--test/CodeGen/AMDGPU/branch-relaxation.ll16
-rw-r--r--test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll5
-rw-r--r--test/CodeGen/AMDGPU/skip-if-dead.ll17
-rw-r--r--test/CodeGen/AMDGPU/uniform-cfg.ll32
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll6
-rw-r--r--test/CodeGen/ARM/atomic-op.ll4
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll35
-rw-r--r--test/CodeGen/ARM/cmpxchg-weak.ll8
-rw-r--r--test/CodeGen/ARM/machine-cse-cmp.ll2
-rw-r--r--test/CodeGen/Mips/brconeq.ll4
-rw-r--r--test/CodeGen/Mips/brconeqk.ll4
-rw-r--r--test/CodeGen/Mips/brcongt.ll4
-rw-r--r--test/CodeGen/Mips/brconlt.ll4
-rw-r--r--test/CodeGen/Mips/brconnez.ll4
-rw-r--r--test/CodeGen/Mips/llvm-ir/ashr.ll14
-rw-r--r--test/CodeGen/Mips/micromips-compact-branches.ll3
-rw-r--r--test/CodeGen/PowerPC/misched-inorder-latency.ll4
-rw-r--r--test/CodeGen/PowerPC/tail-dup-break-cfg.ll59
-rw-r--r--test/CodeGen/PowerPC/tail-dup-layout.ll54
-rwxr-xr-xtest/CodeGen/SPARC/sjlj.ll9
-rw-r--r--test/CodeGen/SystemZ/asm-18.ll4
-rw-r--r--test/CodeGen/SystemZ/cond-store-01.ll10
-rw-r--r--test/CodeGen/SystemZ/cond-store-02.ll10
-rw-r--r--test/CodeGen/SystemZ/cond-store-03.ll10
-rw-r--r--test/CodeGen/SystemZ/cond-store-04.ll10
-rw-r--r--test/CodeGen/SystemZ/cond-store-05.ll5
-rw-r--r--test/CodeGen/SystemZ/cond-store-06.ll5
-rw-r--r--test/CodeGen/SystemZ/int-cmp-37.ll24
-rw-r--r--test/CodeGen/SystemZ/int-cmp-40.ll20
-rw-r--r--test/CodeGen/SystemZ/int-cmp-44.ll6
-rw-r--r--test/CodeGen/SystemZ/int-cmp-48.ll26
-rw-r--r--test/CodeGen/SystemZ/tdc-06.ll14
-rw-r--r--test/CodeGen/Thumb/thumb-shrink-wrapping.ll11
-rw-r--r--test/CodeGen/Thumb2/cbnz.ll2
-rw-r--r--test/CodeGen/Thumb2/ifcvt-compare.ll2
-rw-r--r--test/CodeGen/Thumb2/v8_IT_4.ll5
-rw-r--r--test/CodeGen/WebAssembly/phi.ll5
-rw-r--r--test/CodeGen/X86/2008-11-29-ULT-Sign.ll4
-rw-r--r--test/CodeGen/X86/add.ll6
-rw-r--r--test/CodeGen/X86/avx-splat.ll7
-rw-r--r--test/CodeGen/X86/avx512-cmp.ll5
-rw-r--r--test/CodeGen/X86/bt.ll10
-rw-r--r--test/CodeGen/X86/critical-edge-split-2.ll5
-rw-r--r--test/CodeGen/X86/fp-une-cmp.ll4
-rw-r--r--test/CodeGen/X86/jump_sign.ll4
-rw-r--r--test/CodeGen/X86/machine-cse.ll4
-rw-r--r--test/CodeGen/X86/shift-double.ll24
-rw-r--r--test/CodeGen/X86/sink-hoist.ll3
-rw-r--r--test/CodeGen/X86/sse-scalar-fp-arith.ll16
-rw-r--r--test/CodeGen/X86/testb-je-fusion.ll3
57 files changed, 205 insertions, 420 deletions
diff --git a/test/CodeGen/AArch64/addsub.ll b/test/CodeGen/AArch64/addsub.ll
index 6adeeeec296..c0235cd5d9e 100644
--- a/test/CodeGen/AArch64/addsub.ll
+++ b/test/CodeGen/AArch64/addsub.ll
@@ -140,17 +140,12 @@ test4:
test5:
; CHECK: cmn {{w[0-9]+}}, #444
-; CHECK: b.le [[TEST6:.?LBB[0-9]+_[0-9]+]]
+; CHECK: b.gt [[RET]]
%newval5 = add i32 %val, 4
store i32 %newval5, i32* @var_i32
%cmp_neg_uge = icmp sgt i32 %val2, -444
br i1 %cmp_neg_uge, label %ret, label %test6
-; CHECK: {{^}}[[RET]]:
-; CHECK: ret
-; CHECK: {{^}}[[TEST6]]:
-; CHECK: ret
-
test6:
%newval6 = add i32 %val, 5
store i32 %newval6, i32* @var_i32
diff --git a/test/CodeGen/AArch64/arm64-atomic.ll b/test/CodeGen/AArch64/arm64-atomic.ll
index 2c9a3bbaa50..c87103481ad 100644
--- a/test/CodeGen/AArch64/arm64-atomic.ll
+++ b/test/CodeGen/AArch64/arm64-atomic.ll
@@ -9,10 +9,10 @@ define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: ret
+; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: ret
+; CHECK-NEXT: [[EXITBB]]:
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
@@ -27,12 +27,10 @@ define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], [[NEW]], [x0]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: mov x0, x[[ADDR]]
-; CHECK-NEXT: ret
+; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: mov x0, x[[ADDR]]
-; CHECK-NEXT: ret
+; CHECK-NEXT: [[EXITBB]]:
%new = load i32, i32* %pnew
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
%val = extractvalue { i32, i1 } %pair, 0
@@ -43,15 +41,15 @@ define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
; CHECK-LABEL: val_compare_and_swap_rel:
; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0
; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
-; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x[[ADDR]]
; CHECK-NEXT: cmp [[RESULT]], w1
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
-; CHECK-NEXT: stlxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: ret
+; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: ret
+; CHECK-NEXT: [[EXITBB]]:
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
@@ -66,10 +64,10 @@ define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], x2, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: ret
+; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: ret
+; CHECK-NEXT: [[EXITBB]]:
%pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic
%val = extractvalue { i64, i1 } %pair, 0
ret i64 %val
diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll
index 89cdc377d05..2682fa7dcce 100644
--- a/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -108,10 +108,10 @@ if.end: ; preds = %if.then, %lor.lhs.f
; CHECK: cmp w0, #1
; CHECK: sdiv [[DIVRES:w[0-9]+]], w1, w0
; CHECK: ccmp [[DIVRES]], #16, #0, ge
-; CHECK: b.le [[BLOCK:LBB[0-9_]+]]
-; CHECK: orr w0, wzr, #0x7
-; CHECK: [[BLOCK]]:
+; CHECK: b.gt [[BLOCK:LBB[0-9_]+]]
; CHECK: bl _foo
+; CHECK: [[BLOCK]]:
+; CHECK: orr w0, wzr, #0x7
define i32 @speculate_division(i32 %a, i32 %b) nounwind ssp {
entry:
%cmp = icmp sgt i32 %a, 0
@@ -135,7 +135,7 @@ if.end:
; CHECK: cmp
; CHECK-NOT: b.
; CHECK: fccmp {{.*}}, #8, ge
-; CHECK: b.ge
+; CHECK: b.lt
define i32 @single_fcmp(i32 %a, float %b) nounwind ssp {
entry:
%cmp = icmp sgt i32 %a, 0
diff --git a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index 4df220eddbb..255cd8e4a0d 100644
--- a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -346,15 +346,19 @@ entry:
; CHECK-NEXT: sub w1, w1, #1
; CHECK-NEXT: add [[SUM]], [[SUM]], [[VA_VAL]]
; CHECK-NEXT: cbnz w1, [[LOOP_LABEL]]
-; CHECK-NEXT: [[IFEND_LABEL]]:
+; DISABLE-NEXT: b [[IFEND_LABEL]]
+;
+; DISABLE: [[ELSE_LABEL]]: ; %if.else
+; DISABLE: lsl w0, w1, #1
+;
+; CHECK: [[IFEND_LABEL]]:
; Epilogue code.
; CHECK: add sp, sp, #16
; CHECK-NEXT: ret
;
-; CHECK: [[ELSE_LABEL]]: ; %if.else
-; CHECK-NEXT: lsl w0, w1, #1
-; DISABLE-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; ENABLE: [[ELSE_LABEL]]: ; %if.else
+; ENABLE-NEXT: lsl w0, w1, #1
+; ENABLE_NEXT: ret
define i32 @variadicFunc(i32 %cond, i32 %count, ...) #0 {
entry:
%ap = alloca i8*, align 8
diff --git a/test/CodeGen/AArch64/compare-branch.ll b/test/CodeGen/AArch64/compare-branch.ll
index adbad58b7db..50631445122 100644
--- a/test/CodeGen/AArch64/compare-branch.ll
+++ b/test/CodeGen/AArch64/compare-branch.ll
@@ -27,7 +27,7 @@ test4:
%val4 = load volatile i64, i64* @var64
%tst4 = icmp ne i64 %val4, 0
br i1 %tst4, label %end, label %test5, !prof !1
-; CHECK: cbz {{x[0-9]+}}, .LBB
+; CHECK: cbnz {{x[0-9]+}}, .LBB
test5:
store volatile i64 %val4, i64* @var64
diff --git a/test/CodeGen/AArch64/logical_shifted_reg.ll b/test/CodeGen/AArch64/logical_shifted_reg.ll
index d1ac718a34a..1c15f1521c5 100644
--- a/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -210,7 +210,7 @@ test2:
test3:
; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, asr #12
-; CHECK: b.le .L
+; CHECK: b.gt .L
%asr_op = ashr i64 %val2, 12
%asr_and = and i64 %asr_op, %val1
%tst3 = icmp sgt i64 %asr_and, 0
diff --git a/test/CodeGen/AArch64/tbz-tbnz.ll b/test/CodeGen/AArch64/tbz-tbnz.ll
index 0e58e36b510..0dd265c18ec 100644
--- a/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -10,7 +10,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
; CHECK: sub [[CMP:w[0-9]+]], w0, #12
-; CHECK: tbnz [[CMP]], #31
+; CHECK: tbz [[CMP]], #31
if.then:
call void @t()
@@ -28,7 +28,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
; CHECK: sub [[CMP:x[0-9]+]], x0, #12
-; CHECK: tbnz [[CMP]], #63
+; CHECK: tbz [[CMP]], #63
if.then:
call void @t()
@@ -118,7 +118,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
; CHECK: sub [[CMP:w[0-9]+]], w0, #12
-; CHECK: tbnz [[CMP]], #31
+; CHECK: tbz [[CMP]], #31
if.then:
call void @t()
@@ -178,7 +178,7 @@ define void @test9(i64 %val1) {
br i1 %tst, label %if.then, label %if.end
; CHECK-NOT: cmp
-; CHECK: tbnz x0, #63
+; CHECK: tbz x0, #63
if.then:
call void @t()
@@ -194,7 +194,7 @@ define void @test10(i64 %val1) {
br i1 %tst, label %if.then, label %if.end
; CHECK-NOT: cmp
-; CHECK: tbnz x0, #63
+; CHECK: tbz x0, #63
if.then:
call void @t()
@@ -209,7 +209,7 @@ define void @test11(i64 %val1, i64* %ptr) {
; CHECK: ldr [[CMP:x[0-9]+]], [x1]
; CHECK-NOT: cmp
-; CHECK: tbnz [[CMP]], #63
+; CHECK: tbz [[CMP]], #63
%val = load i64, i64* %ptr
%tst = icmp slt i64 %val, 0
@@ -229,7 +229,7 @@ define void @test12(i64 %val1) {
br i1 %tst, label %if.then, label %if.end
; CHECK-NOT: cmp
-; CHECK: tbnz x0, #63
+; CHECK: tbz x0, #63
if.then:
call void @t()
@@ -247,7 +247,7 @@ define void @test13(i64 %val1, i64 %val2) {
; CHECK: orr [[CMP:x[0-9]+]], x0, x1
; CHECK-NOT: cmp
-; CHECK: tbnz [[CMP]], #63
+; CHECK: tbz [[CMP]], #63
if.then:
call void @t()
@@ -262,7 +262,7 @@ define void @test14(i1 %cond) {
br i1 %cond, label %if.end, label %if.then
; CHECK-NOT: and
-; CHECK: tbz w0, #0
+; CHECK: tbnz w0, #0
if.then:
call void @t()
diff --git a/test/CodeGen/AMDGPU/branch-relaxation.ll b/test/CodeGen/AMDGPU/branch-relaxation.ll
index 3fd40521801..39505404a86 100644
--- a/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -335,12 +335,6 @@ loop:
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: [[BB3]]: ; %bb3
-; GCN-NEXT: ;;#ASMSTART
-; GCN-NEXT: v_nop_e64
-; GCN-NEXT: ;;#ASMEND
-; GCN-NEXT: ;;#ASMSTART
-; GCN-NEXT: v_nop_e64
-; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_endpgm
define void @expand_requires_expand(i32 %cond0) #0 {
bb0:
@@ -362,12 +356,6 @@ bb2:
br label %bb3
bb3:
-; These NOPs prevent tail-duplication-based outlining
-; from firing, which defeats the need to expand the branches and this test.
- call void asm sideeffect
- "v_nop_e64", ""() #0
- call void asm sideeffect
- "v_nop_e64", ""() #0
ret void
}
@@ -397,7 +385,6 @@ bb3:
; GCN-NEXT: [[ENDIF]]: ; %endif
; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
-; GCN-NEXT: s_sleep 5
; GCN-NEXT: s_endpgm
define void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) #0 {
entry:
@@ -415,9 +402,6 @@ if_uniform:
br label %endif
endif:
- ; layout can remove the split branch if it can copy the return block.
- ; This call makes the return block long enough that it doesn't get copied.
- call void @llvm.amdgcn.s.sleep(i32 5);
ret void
}
diff --git a/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll b/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll
index 574f9a64e57..ef616eb6380 100644
--- a/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll
+++ b/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll
@@ -37,10 +37,7 @@ bb5: ; preds = %bb3, %bb1
; OPT-NOT: call i1 @llvm.amdgcn.loop
; GCN-LABEL: {{^}}annotate_ret_noloop:
-; GCN: s_cbranch_scc0 [[BODY:BB[0-9]+_[0-9]+]]
-; GCN: s_endpgm
-
-; GCN: {{^}}[[BODY]]:
+; GCN: s_cbranch_scc1
; GCN: s_endpgm
; GCN: .Lfunc_end1
define void @annotate_ret_noloop(<4 x float> addrspace(1)* noalias nocapture readonly %arg) #0 {
diff --git a/test/CodeGen/AMDGPU/skip-if-dead.ll b/test/CodeGen/AMDGPU/skip-if-dead.ll
index 23b67534440..60cee7a3499 100644
--- a/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -263,25 +263,18 @@ exit:
; CHECK-NEXT: s_endpgm
; CHECK: [[KILLBB:BB[0-9]+_[0-9]+]]:
-; CHECK-NEXT: s_cbranch_scc1 [[BB8:BB[0-9]+_[0-9]+]]
+; CHECK-NEXT: s_cbranch_scc0 [[PHIBB:BB[0-9]+_[0-9]+]]
+; CHECK: [[PHIBB]]:
; CHECK: v_cmp_eq_f32_e32 vcc, 0, [[PHIREG]]
-; CHECK-NEXT: s_cbranch_vccnz [[BB10:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: s_branch [[END:BB[0-9]+_[0-9]+]]
+; CHECK-NEXT: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
-; CHECK [[BB8]]: ; %BB8
-; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 8
-; CHECK: buffer_store_dword
-; CHECK: v_cmp_eq_f32_e32 vcc, 0, [[PHIREG]]
-; CHECK-NEXT: s_cbranch_vccz [[END]]
-
-; CHECK: [[BB10]]: ; %bb10
+; CHECK: ; %bb10
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 9
; CHECK: buffer_store_dword
-; CHECK: [[END:BB[0-9]+_[0-9]+]]: ; %end
+; CHECK: [[ENDBB]]:
; CHECK-NEXT: s_endpgm
-
define amdgpu_ps void @phi_use_def_before_kill() #0 {
bb:
%tmp = fadd float undef, 1.000000e+00
diff --git a/test/CodeGen/AMDGPU/uniform-cfg.ll b/test/CodeGen/AMDGPU/uniform-cfg.ll
index c0d0a750b27..a0060bd368b 100644
--- a/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -252,11 +252,9 @@ ENDIF: ; preds = %IF, %main_body
; GCN: s_cmp_lt_i32 [[COND]], 1
; GCN: s_cbranch_scc1 [[EXIT:[A-Za-z0-9_]+]]
; GCN: v_cmp_gt_i32_e64 vcc, [[COND]], 0{{$}}
-; GCN: s_cbranch_vccz [[BODY:[A-Za-z0-9_]+]]
-; GCN: {{^}}[[EXIT]]:
-; GCN: s_endpgm
-; GCN: {{^}}[[BODY]]:
+; GCN: s_cbranch_vccnz [[EXIT]]
; GCN: buffer_store
+; GCN: {{^}}[[EXIT]]:
; GCN: s_endpgm
define void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) {
bb:
@@ -304,10 +302,9 @@ done:
; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
; GCN: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
; GCN: s_xor_b64 [[MASK1:s\[[0-9]+:[0-9]+\]]], exec, [[MASK]]
+; GCN: s_cbranch_execz [[ENDIF_LABEL:[0-9_A-Za-z]+]]
; GCN: s_cmp_lg_u32 {{s[0-9]+}}, 0
-; GCN: s_cbranch_scc0 [[IF_UNIFORM_LABEL:[A-Z0-9_a-z]+]]
-; GCN: s_endpgm
-; GCN: {{^}}[[IF_UNIFORM_LABEL]]:
+; GCN: s_cbranch_scc1 [[ENDIF_LABEL]]
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
; GCN: buffer_store_dword [[ONE]]
define void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) {
@@ -331,13 +328,14 @@ endif:
; GCN-LABEL: {{^}}divergent_inside_uniform:
; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cbranch_scc0 [[IF_LABEL:[0-9_A-Za-z]+]]
-; GCN: [[IF_LABEL]]:
+; GCN: s_cbranch_scc1 [[ENDIF_LABEL:[0-9_A-Za-z]+]]
; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
; GCN: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
; GCN: s_xor_b64 [[MASK1:s\[[0-9]+:[0-9]+\]]], exec, [[MASK]]
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
; GCN: buffer_store_dword [[ONE]]
+; GCN: [[ENDIF_LABEL]]:
+; GCN: s_endpgm
define void @divergent_inside_uniform(i32 addrspace(1)* %out, i32 %cond) {
entry:
%u_cmp = icmp eq i32 %cond, 0
@@ -365,11 +363,11 @@ endif:
; GCN: buffer_store_dword [[ONE]]
; GCN: s_or_b64 exec, exec, [[MASK]]
; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cbranch_scc0 [[IF_UNIFORM:[A-Z0-9_]+]]
-; GCN: s_endpgm
-; GCN: [[IF_UNIFORM]]:
+; GCN: s_cbranch_scc1 [[EXIT:[A-Z0-9_]+]]
; GCN: v_mov_b32_e32 [[TWO:v[0-9]+]], 2
; GCN: buffer_store_dword [[TWO]]
+; GCN: [[EXIT]]:
+; GCN: s_endpgm
define void @divergent_if_uniform_if(i32 addrspace(1)* %out, i32 %cond) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
@@ -400,20 +398,16 @@ exit:
; GCN-LABEL: {{^}}cse_uniform_condition_different_blocks:
; GCN: s_load_dword [[COND:s[0-9]+]]
; GCN: s_cmp_lt_i32 [[COND]], 1
-; GCN: s_cbranch_scc1 [[FN:BB[0-9_]+]]
+; GCN: s_cbranch_scc1 BB[[FNNUM:[0-9]+]]_3
; GCN: BB#1:
; GCN-NOT: cmp
; GCN: buffer_load_dword
; GCN: buffer_store_dword
-; GCN: s_cbranch_scc0 [[BB7:BB[0-9_]+]]
+; GCN: s_cbranch_scc1 BB[[FNNUM]]_3
-; GCN: [[FN]]:
+; GCN: BB[[FNNUM]]_3:
; GCN: s_endpgm
-
-; GCN: [[BB7]]:
-; GCN: s_endpgm
-
define void @cse_uniform_condition_different_blocks(i32 %cond, i32 addrspace(1)* %out) {
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 2fb0d231825..9bd2077e4d0 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -49,9 +49,9 @@ tailrecurse.switch: ; preds = %tailrecurse
; V8-NEXT: beq
; V8-NEXT: %tailrecurse.switch
; V8: cmp
-; V8-NEXT: beq
-; V8-NEXT: %sw.epilog
-; V8-NEXT: bx lr
+; V8-NEXT: bne
+; V8-NEXT: b
+; The trailing space in the last line checks that the branch is unconditional
switch i32 %and, label %sw.epilog [
i32 1, label %sw.bb
i32 3, label %sw.bb6
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index 23c4ccea460..e6a4949d53c 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -320,10 +320,10 @@ define i32 @test_cmpxchg_fail_order1(i32 *%addr, i32 %desired, i32 %new) {
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
; CHECK: cmp [[SUCCESS]], #0
; CHECK: bne [[LOOP_BB]]
-; CHECK: dmb ish
-; CHECK: bx lr
+; CHECK: b [[END_BB:\.?LBB[0-9]+_[0-9]+]]
; CHECK: [[FAIL_BB]]:
; CHECK-NEXT: clrex
+; CHECK-NEXT: [[END_BB]]:
; CHECK: dmb ish
; CHECK: bx lr
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index d1575ed12e4..77b850bd617 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -1045,21 +1045,20 @@ define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind
; function there.
; CHECK-ARM-NEXT: cmp r[[OLD]], r0
; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]]
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
; CHECK: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK-ARM: mov r0, r[[OLD]]
-; CHECK: bx lr
-; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: b .LBB{{[0-9]+}}_4
+; CHECK-NEXT: .LBB{{[0-9]+}}_3:
; CHECK-NEXT: clrex
+; CHECK-NEXT: .LBB{{[0-9]+}}_4:
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK-ARM: mov r0, r[[OLD]]
-; CHECK-ARM-NEXT: bx lr
ret i8 %old
}
@@ -1079,21 +1078,20 @@ define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounw
; function there.
; CHECK-ARM-NEXT: cmp r[[OLD]], r0
; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]]
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
; CHECK: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK-ARM: mov r0, r[[OLD]]
-; CHECK: bx lr
-; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: b .LBB{{[0-9]+}}_4
+; CHECK-NEXT: .LBB{{[0-9]+}}_3:
; CHECK-NEXT: clrex
+; CHECK-NEXT: .LBB{{[0-9]+}}_4:
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK-ARM: mov r0, r[[OLD]]
-; CHECK-ARM-NEXT: bx lr
ret i16 %old
}
@@ -1112,21 +1110,20 @@ define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLD]], r0
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
; CHECK: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK: str{{(.w)?}} r[[OLD]],
-; CHECK-NEXT: bx lr
-; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: b .LBB{{[0-9]+}}_4
+; CHECK-NEXT: .LBB{{[0-9]+}}_3:
; CHECK-NEXT: clrex
+; CHECK-NEXT: .LBB{{[0-9]+}}_4:
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: str{{(.w)?}} r[[OLD]],
-; CHECK-ARM-NEXT: bx lr
ret void
}
@@ -1151,16 +1148,16 @@ define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-BE-DAG: eor{{(\.w)?}} [[MISMATCH_LO:r[0-9]+|lr]], [[OLD1]], r0
; CHECK-ARM-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]]
; CHECK-THUMB-BE: orrs{{(\.w)?}} {{(r[0-9]+, )?}}[[MISMATCH_LO]], [[MISMATCH_HI]]
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r2, r3 is a reasonable guess.
; CHECK: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
-; CHECK-NEXT: pop
-; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: b .LBB{{[0-9]+}}_4
+; CHECK-NEXT: .LBB{{[0-9]+}}_3:
; CHECK-NEXT: clrex
+; CHECK-NEXT: .LBB{{[0-9]+}}_4:
; CHECK-NOT: dmb
; CHECK-NOT: mcr
diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll
index 0d5681aafbc..4038528c91b 100644
--- a/test/CodeGen/ARM/cmpxchg-weak.ll
+++ b/test/CodeGen/ARM/cmpxchg-weak.ll
@@ -13,16 +13,14 @@ define void @test_cmpxchg_weak(i32 *%addr, i32 %desired, i32 %new) {
; CHECK-NEXT: dmb ish
; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r2, [r0]
; CHECK-NEXT: cmp [[SUCCESS]], #0
-; CHECK-NEXT: beq [[SUCCESSBB:LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: bne [[FAILBB:LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: BB#2:
+; CHECK-NEXT: dmb ish
; CHECK-NEXT: str r3, [r0]
; CHECK-NEXT: bx lr
; CHECK-NEXT: [[LDFAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: str r3, [r0]
-; CHECK-NEXT: bx lr
-; CHECK-NEXT: [[SUCCESSBB]]:
-; CHECK-NEXT: dmb ish
+; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: str r3, [r0]
; CHECK-NEXT: bx lr
diff --git a/test/CodeGen/ARM/machine-cse-cmp.ll b/test/CodeGen/ARM/machine-cse-cmp.ll
index 259ebbd345e..611cba6ed1f 100644
--- a/test/CodeGen/ARM/machine-cse-cmp.ll
+++ b/test/CodeGen/ARM/machine-cse-cmp.ll
@@ -52,7 +52,7 @@ entry:
; CHECK-LABEL: f3:
; CHECK-NOT: sub
; CHECK: cmp
-; CHECK: bge
+; CHECK: blt
%0 = load i32, i32* %offset, align 4
%cmp = icmp slt i32 %0, %size
%s = sub nsw i32 %0, %size
diff --git a/test/CodeGen/Mips/brconeq.ll b/test/CodeGen/Mips/brconeq.ll
index 40c281e758a..7c3c31e0ec3 100644
--- a/test/CodeGen/Mips/brconeq.ll
+++ b/test/CodeGen/Mips/brconeq.ll
@@ -8,11 +8,11 @@ define void @test() nounwind {
entry:
%0 = load i32, i32* @i, align 4
%1 = load i32, i32* @j, align 4
- %cmp = icmp ne i32 %0, %1
+ %cmp = icmp eq i32 %0, %1
; 16: cmp ${{[0-9]+}}, ${{[0-9]+}}
; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]]
; 16: $[[LABEL]]:
- br i1 %cmp, label %if.then, label %if.end
+ br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
store i32 1, i32* @result, align 4
diff --git a/test/CodeGen/Mips/brconeqk.ll b/test/CodeGen/Mips/brconeqk.ll
index b541554d548..85d257e8d79 100644
--- a/test/CodeGen/Mips/brconeqk.ll
+++ b/test/CodeGen/Mips/brconeqk.ll
@@ -6,8 +6,8 @@
define void @test() nounwind {
entry:
%0 = load i32, i32* @i, align 4
- %cmp = icmp ne i32 %0, 10
- br i1 %cmp, label %if.then, label %if.end
+ %cmp = icmp eq i32 %0, 10
+ br i1 %cmp, label %if.end, label %if.then
; 16: cmpi ${{[0-9]+}}, {{[0-9]+}}
; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]]
; 16: $[[LABEL]]:
diff --git a/test/CodeGen/Mips/brcongt.ll b/test/CodeGen/Mips/brcongt.ll
index d4d81751846..7dffdb41121 100644
--- a/test/CodeGen/Mips/brcongt.ll
+++ b/test/CodeGen/Mips/brcongt.ll
@@ -9,8 +9,8 @@ define void @test() nounwind {
entry:
%0 = load i32, i32* @i, align 4
%1 = load i32, i32* @j, align 4
- %cmp = icmp sle i32 %0, %1
- br i1 %cmp, label %if.then, label %if.end
+ %cmp = icmp sgt i32 %0, %1
+ br i1 %cmp, label %if.end, label %if.then
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; 16: btnez $[[LABEL:[0-9A-Ba-b_]+]]
; 16: $[[LABEL]]:
diff --git a/test/CodeGen/Mips/brconlt.ll b/test/CodeGen/Mips/brconlt.ll
index 9a69b8c93ca..65f6c347b67 100644
--- a/test/CodeGen/Mips/brconlt.ll
+++ b/test/CodeGen/Mips/brconlt.ll
@@ -10,8 +10,8 @@ define void @test() nounwind {
entry:
%0 = load i32, i32* @j, align 4
%1 = load i32, i32* @i, align 4
- %cmp = icmp sge i32 %0, %1
- br i1 %cmp, label %if.then, label %if.end
+ %cmp = icmp slt i32 %0, %1
+ br i1 %cmp, label %if.end, label %if.then
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; MM32R6: slt ${{[0-9]+}}, ${{[0-9]+}}
diff --git a/test/CodeGen/Mips/brconnez.ll b/test/CodeGen/Mips/brconnez.ll
index eafddccdd4c..27cf9e8cacb 100644
--- a/test/CodeGen/Mips/brconnez.ll
+++ b/test/CodeGen/Mips/brconnez.ll
@@ -7,7 +7,7 @@ define void @test() nounwind {
entry:
%0 = load i32, i32* @j, align 4
%cmp = icmp eq i32 %0, 0
- br i1 %cmp, label %if.then, label %if.end, !prof !1
+ br i1 %cmp, label %if.then, label %if.end
; 16: bnez ${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]]
; 16: lw ${{[0-9]+}}, %got(result)(${{[0-9]+}})
@@ -21,4 +21,4 @@ if.end: ; preds = %if.then, %entry
ret void
}
-!1 = !{!"branch_weights", i32 2, i32 1}
+
diff --git a/test/CodeGen/Mips/llvm-ir/ashr.ll b/test/CodeGen/Mips/llvm-ir/ashr.ll
index 5b39ba408a0..c8d0e76f94e 100644
--- a/test/CodeGen/Mips/llvm-ir/ashr.ll
+++ b/test/CodeGen/Mips/llvm-ir/ashr.ll
@@ -91,13 +91,12 @@ entry:
; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
; M2: or $3, $[[T3]], $[[T2]]
; M2: $[[BB0]]:
- ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
- ; M2: nop
- ; M2: jr $ra
+ ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]]
; M2: nop
+ ; M2: sra $2, $4, 31
; M2: $[[BB1]]:
; M2: jr $ra
- ; M2: sra $2, $4, 31
+ ; M2: nop
; 32R1-R5: srlv $[[T0:[0-9]+]], $5, $7
; 32R1-R5: not $[[T1:[0-9]+]], $7
@@ -178,13 +177,12 @@ entry:
; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
; M3: or $3, $[[T7]], $[[T4]]
; M3: [[BB0]]:
- ; M3: bnez $[[T3]], [[BB1:.LBB[0-9_]+]]
- ; M3: nop
- ; M3: jr $ra
+ ; M3: beqz $[[T3]], [[BB1:.LBB[0-9_]+]]
; M3: nop
+ ; M3: dsra $2, $4, 63
; M3: [[BB1]]:
; M3: jr $ra
- ; M3: dsra $2, $4, 63
+ ; M3: nop
; GP64-NOT-R6: dsrlv $[[T0:[0-9]+]], $5, $7
; GP64-NOT-R6: dsll $[[T1:[0-9]+]], $4, 1
diff --git a/test/CodeGen/Mips/micromips-compact-branches.ll b/test/CodeGen/Mips/micromips-compact-branches.ll
index 332cd8cd105..c689944d386 100644
--- a/test/CodeGen/Mips/micromips-compact-branches.ll
+++ b/test/CodeGen/Mips/micromips-compact-branches.ll
@@ -6,7 +6,7 @@ entry:
%x = alloca i32, align 4
%0 = load i32, i32* %x, align 4
%cmp = icmp eq i32 %0, 0
- br i1 %cmp, label %if.then, label %if.end, !prof !1
+ br i1 %cmp, label %if.then, label %if.end
if.then:
store i32 10, i32* %x, align 4
@@ -17,4 +17,3 @@ if.end:
}
; CHECK: bnezc
-!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/PowerPC/misched-inorder-latency.ll b/test/CodeGen/PowerPC/misched-inorder-latency.ll
index 26663d81f35..ded3111da97 100644
--- a/test/CodeGen/PowerPC/misched-inorder-latency.ll
+++ b/test/CodeGen/PowerPC/misched-inorder-latency.ll
@@ -17,7 +17,7 @@ entry:
%sum1 = add i32 %sumin, 1
%val1 = load i32, i32* %ptr
%p = icmp eq i32 %sumin, 0
- br i1 %p, label %true, label %end, !prof !1
+ br i1 %p, label %true, label %end
true:
%sum2 = add i32 %sum1, 1
%ptr2 = getelementptr i32, i32* %ptr, i32 1
@@ -53,5 +53,3 @@ end:
ret i32 %valmerge
}
declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind
-
-!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/PowerPC/tail-dup-break-cfg.ll b/test/CodeGen/PowerPC/tail-dup-break-cfg.ll
deleted file mode 100644
index 0c95388934c..00000000000
--- a/test/CodeGen/PowerPC/tail-dup-break-cfg.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc -O2 -o - %s | FileCheck %s
-target datalayout = "e-m:e-i64:64-n32:64"
-target triple = "powerpc64le-grtev4-linux-gnu"
-
-; Intended layout:
-; The code for tail-duplication during layout will produce the layout:
-; test1
-; test2
-; body1 (with copy of test2)
-; body2
-; exit
-
-;CHECK-LABEL: tail_dup_break_cfg:
-;CHECK: mr [[TAGREG:[0-9]+]], 3
-;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
-;CHECK-NEXT: bc 12, 1, [[BODY1LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[TEST2LABEL:[._0-9A-Za-z]+]]: # %test2
-;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
-;CHECK-NEXT: bne 0, [[BODY2LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: b [[EXITLABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[BODY1LABEL]]
-;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
-;CHECK-NEXT: beq 0, [[EXITLABEL]]
-;CHECK-NEXT: [[BODY2LABEL]]
-;CHECK: [[EXITLABEL:[._0-9A-Za-z]+]]: # %exit
-;CHECK: blr
-define void @tail_dup_break_cfg(i32 %tag) {
-entry:
- br label %test1
-test1:
- %tagbit1 = and i32 %tag, 1
- %tagbit1eq0 = icmp eq i32 %tagbit1, 0
- br i1 %tagbit1eq0, label %test2, label %body1, !prof !1 ; %test2 more likely
-body1:
- call void @a()
- call void @a()
- call void @a()
- call void @a()
- br label %test2
-test2:
- %tagbit2 = and i32 %tag, 2
- %tagbit2eq0 = icmp eq i32 %tagbit2, 0
- br i1 %tagbit2eq0, label %exit, label %body2
-body2:
- call void @b()
- call void @b()
- call void @b()
- call void @b()
- br label %exit
-exit:
- ret void
-}
-
-declare void @a()
-declare void @b()
-declare void @c()
-declare void @d()
-
-!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/PowerPC/tail-dup-layout.ll b/test/CodeGen/PowerPC/tail-dup-layout.ll
index d6e339e1a97..6790aa8e944 100644
--- a/test/CodeGen/PowerPC/tail-dup-layout.ll
+++ b/test/CodeGen/PowerPC/tail-dup-layout.ll
@@ -19,7 +19,7 @@ target triple = "powerpc64le-grtev4-linux-gnu"
; The CHECK statements check for the whole string of tests and exit block,
; and then check that the correct test has been duplicated into the end of
; the optional blocks and that the optional blocks are in the correct order.
-;CHECK-LABEL: straight_test:
+;CHECK-LABEL: f:
; test1 may have been merged with entry
;CHECK: mr [[TAGREG:[0-9]+]], 3
;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
@@ -47,7 +47,7 @@ target triple = "powerpc64le-grtev4-linux-gnu"
;CHECK-NEXT: [[OPT4LABEL]]
;CHECK: b [[EXITLABEL]]
-define void @straight_test(i32 %tag) {
+define void @f(i32 %tag) {
entry:
br label %test1
test1:
@@ -94,57 +94,7 @@ exit:
ret void
}
-; The block then2 is not unavoidable, but since it can be tail-duplicated, it
-; should be placed as a fallthrough from test2 and copied.
-; CHECK-LABEL: avoidable_test:
-; CHECK: # %entry
-; CHECK: andi.
-; CHECK: # %test2
-; Make sure then2 falls through from test2
-; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
-; CHECK: # %then2
-; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
-; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
-; CHECK: # %end2
-; CHECK: # %else1
-; CHECK: bl a
-; CHECK: bl a
-; Make sure then2 was copied into else1
-; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
-; CHECK: # %else2
-; CHECK: bl c
-define void @avoidable_test(i32 %tag) {
-entry:
- br label %test1
-test1:
- %tagbit1 = and i32 %tag, 1
- %tagbit1eq0 = icmp eq i32 %tagbit1, 0
- br i1 %tagbit1eq0, label %test2, label %else1, !prof !1 ; %test2 more likely
-else1:
- call void @a()
- call void @a()
- br label %then2
-test2:
- %tagbit2 = and i32 %tag, 2
- %tagbit2eq0 = icmp eq i32 %tagbit2, 0
- br i1 %tagbit2eq0, label %then2, label %else2, !prof !1 ; %then2 more likely
-then2:
- %tagbit3 = and i32 %tag, 4
- %tagbit3eq0 = icmp eq i32 %tagbit3, 0
- br i1 %tagbit3eq0, label %end2, label %end1, !prof !1 ; %end2 more likely
-else2:
- call void @c()
- br label %end2
-end2:
- ret void
-end1:
- call void @d()
- ret void
-}
-
declare void @a()
declare void @b()
declare void @c()
declare void @d()
-
-!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/SPARC/sjlj.ll b/test/CodeGen/SPARC/sjlj.ll
index 647d8f2fd2c..3bf583aa475 100755
--- a/test/CodeGen/SPARC/sjlj.ll
+++ b/test/CodeGen/SPARC/sjlj.ll
@@ -66,15 +66,14 @@ return: ; preds = %if.end, %if.then
; CHECK: ba .LBB1_1
; CHECK: nop
; CHECK:.LBB1_1: ! %entry
+; CHECK: ba .LBB1_3
; CHECK: mov %g0, %i0
-; CHECK: cmp %i0, 0
-; CHECK: bne .LBB1_4
-; CHECK: ba .LBB1_5
; CHECK:.LBB1_2: ! Block address taken
; CHECK: mov 1, %i0
+; CHECK:.LBB1_3: ! %entry
+; CHECK: cmp %i0, 0
; CHECK: be .LBB1_5
-; CHECK:.LBB1_4:
-; CHECK: ba .LBB1_6
+; CHECK: nop
}
declare i8* @llvm.frameaddress(i32) #2
diff --git a/test/CodeGen/SystemZ/asm-18.ll b/test/CodeGen/SystemZ/asm-18.ll
index 7337aec5f27..7909253d188 100644
--- a/test/CodeGen/SystemZ/asm-18.ll
+++ b/test/CodeGen/SystemZ/asm-18.ll
@@ -297,7 +297,7 @@ define void @f13(i32 %x, i32 %y) {
; CHECK: iihf [[REG]], 2102030405
; CHECK: blah [[REG]]
; CHECK: br %r14
- %cmp = icmp ne i32 %x, 0
+ %cmp = icmp eq i32 %x, 0
%val = select i1 %cmp, i32 0, i32 2102030405
call void asm sideeffect "blah $0", "h"(i32 %val)
ret void
@@ -311,7 +311,7 @@ define void @f14(i32 %x, i32 %y) {
; CHECK: iilf [[REG]], 2102030405
; CHECK: blah [[REG]]
; CHECK: br %r14
- %cmp = icmp ne i32 %x, 0
+ %cmp = icmp eq i32 %x, 0
%val = select i1 %cmp, i32 0, i32 2102030405
call void asm sideeffect "blah $0", "r"(i32 %val)
ret void
diff --git a/test/CodeGen/SystemZ/cond-store-01.ll b/test/CodeGen/SystemZ/cond-store-01.ll
index 5bbfc54cf08..a682d222add 100644
--- a/test/CodeGen/SystemZ/cond-store-01.ll
+++ b/test/CodeGen/SystemZ/cond-store-01.ll
@@ -297,11 +297,8 @@ define void @f17(i64 %base, i64 %index, i8 %alt, i32 %limit) {
define void @f18(i8 *%ptr, i8 %alt, i32 %limit) {
; CHECK-LABEL: f18:
; CHECK: lb {{%r[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: stc {{%r[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lr {{%r[0-5]}}, {{%r[0-5]}}
; CHECK: stc {{%r[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
@@ -334,11 +331,8 @@ define void @f20(i8 *%ptr, i8 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CS.
; CHECK-LABEL: f20:
; CHECK: lb {{%r[0-9]+}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: stc {{%r[0-9]+}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lr {{%r[0-5]}}, {{%r[0-5]}}
; CHECK: stc {{%r[0-9]+}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
diff --git a/test/CodeGen/SystemZ/cond-store-02.ll b/test/CodeGen/SystemZ/cond-store-02.ll
index 5201d411e3a..5cb024d8b4e 100644
--- a/test/CodeGen/SystemZ/cond-store-02.ll
+++ b/test/CodeGen/SystemZ/cond-store-02.ll
@@ -297,11 +297,8 @@ define void @f17(i64 %base, i64 %index, i16 %alt, i32 %limit) {
define void @f18(i16 *%ptr, i16 %alt, i32 %limit) {
; CHECK-LABEL: f18:
; CHECK: lh {{%r[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: sth {{%r[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lr {{%r[0-5]}}, {{%r[0-5]}}
; CHECK: sth {{%r[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
@@ -334,11 +331,8 @@ define void @f20(i16 *%ptr, i16 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CS.
; CHECK-LABEL: f20:
; CHECK: lh {{%r[0-9]+}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: sth {{%r[0-9]+}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lr {{%r[0-9]+}}, {{%r[0-9]+}}
; CHECK: sth {{%r[0-9]+}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
diff --git a/test/CodeGen/SystemZ/cond-store-03.ll b/test/CodeGen/SystemZ/cond-store-03.ll
index 6111b7b0abc..46cdbff312c 100644
--- a/test/CodeGen/SystemZ/cond-store-03.ll
+++ b/test/CodeGen/SystemZ/cond-store-03.ll
@@ -226,11 +226,8 @@ define void @f13(i64 %base, i64 %index, i32 %alt, i32 %limit) {
define void @f14(i32 *%ptr, i32 %alt, i32 %limit) {
; CHECK-LABEL: f14:
; CHECK: l {{%r[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: st {{%r[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lr {{%r[0-5]}}, {{%r[0-5]}}
; CHECK: st {{%r[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
@@ -263,11 +260,8 @@ define void @f16(i32 *%ptr, i32 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CS.
; CHECK-LABEL: f16:
; CHECK: l {{%r[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: st {{%r[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lr {{%r[0-5]}}, {{%r[0-5]}}
; CHECK: st {{%r[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
diff --git a/test/CodeGen/SystemZ/cond-store-04.ll b/test/CodeGen/SystemZ/cond-store-04.ll
index 76c13d83417..70124f9ecee 100644
--- a/test/CodeGen/SystemZ/cond-store-04.ll
+++ b/test/CodeGen/SystemZ/cond-store-04.ll
@@ -124,11 +124,8 @@ define void @f7(i64 %base, i64 %index, i64 %alt, i32 %limit) {
define void @f8(i64 *%ptr, i64 %alt, i32 %limit) {
; CHECK-LABEL: f8:
; CHECK: lg {{%r[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: stg {{%r[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lgr {{%r[0-5]}}, {{%r[0-5]}}
; CHECK: stg {{%r[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
@@ -161,11 +158,8 @@ define void @f10(i64 *%ptr, i64 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CSG.
; CHECK-LABEL: f10:
; CHECK: lg {{%r[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: stg {{%r[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: lgr {{%r[0-5]}}, {{%r[0-5]}}
; CHECK: stg {{%r[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
diff --git a/test/CodeGen/SystemZ/cond-store-05.ll b/test/CodeGen/SystemZ/cond-store-05.ll
index 20c4c0fcd83..51a9f6c42ab 100644
--- a/test/CodeGen/SystemZ/cond-store-05.ll
+++ b/test/CodeGen/SystemZ/cond-store-05.ll
@@ -156,11 +156,8 @@ define void @f9(i64 %base, i64 %index, float %alt, i32 %limit) {
define void @f10(float *%ptr, float %alt, i32 %limit) {
; CHECK-LABEL: f10:
; CHECK: le {{%f[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: ste {{%f[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: ler {{%f[0-5]}}, {{%f[0-5]}}
; CHECK: ste {{%f[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
diff --git a/test/CodeGen/SystemZ/cond-store-06.ll b/test/CodeGen/SystemZ/cond-store-06.ll
index 54e7f980d9b..1eac79401bd 100644
--- a/test/CodeGen/SystemZ/cond-store-06.ll
+++ b/test/CodeGen/SystemZ/cond-store-06.ll
@@ -156,11 +156,8 @@ define void @f9(i64 %base, i64 %index, double %alt, i32 %limit) {
define void @f10(double *%ptr, double %alt, i32 %limit) {
; CHECK-LABEL: f10:
; CHECK: ld {{%f[0-5]}}, 0(%r2)
-; CHECK: {{jhe|jnhe}} [[LABEL:[^ ]*]]
-; CHECK: std {{%f[0-5]}}, 0(%r2)
-; CHECK: br %r14
+; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: ldr {{%f[0-5]}}, {{%f[0-5]}}
; CHECK: std {{%f[0-5]}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
diff --git a/test/CodeGen/SystemZ/int-cmp-37.ll b/test/CodeGen/SystemZ/int-cmp-37.ll
index 0843ddc4faf..aabb8a2fd3e 100644
--- a/test/CodeGen/SystemZ/int-cmp-37.ll
+++ b/test/CodeGen/SystemZ/int-cmp-37.ll
@@ -15,8 +15,8 @@ define i32 @f1(i32 %src1) {
entry:
%val = load i16 , i16 *@g
%src2 = zext i16 %val to i32
- %cond = icmp uge i32 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ult i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i32 %src1, %src1
br label %exit
@@ -34,8 +34,8 @@ define i32 @f2(i32 %src1) {
entry:
%val = load i16 , i16 *@g
%src2 = zext i16 %val to i32
- %cond = icmp sge i32 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp slt i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i32 %src1, %src1
br label %exit
@@ -54,8 +54,8 @@ define i32 @f3(i32 %src1) {
entry:
%val = load i16 , i16 *@g
%src2 = zext i16 %val to i32
- %cond = icmp ne i32 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp eq i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i32 %src1, %src1
br label %exit
@@ -74,8 +74,8 @@ define i32 @f4(i32 %src1) {
entry:
%val = load i16 , i16 *@g
%src2 = zext i16 %val to i32
- %cond = icmp eq i32 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ne i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i32 %src1, %src1
br label %exit
@@ -95,8 +95,8 @@ define i32 @f5(i32 %src1) {
entry:
%val = load i16 , i16 *@h, align 1
%src2 = zext i16 %val to i32
- %cond = icmp uge i32 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ult i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i32 %src1, %src1
br label %exit
@@ -115,8 +115,8 @@ define i32 @f6(i32 %src2) {
entry:
%val = load i16 , i16 *@g
%src1 = zext i16 %val to i32
- %cond = icmp uge i32 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ult i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i32 %src2, %src2
br label %exit
diff --git a/test/CodeGen/SystemZ/int-cmp-40.ll b/test/CodeGen/SystemZ/int-cmp-40.ll
index 7e743603590..fc38940ce39 100644
--- a/test/CodeGen/SystemZ/int-cmp-40.ll
+++ b/test/CodeGen/SystemZ/int-cmp-40.ll
@@ -15,8 +15,8 @@ define i64 @f1(i64 %src1) {
entry:
%val = load i16 , i16 *@g
%src2 = zext i16 %val to i64
- %cond = icmp uge i64 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i64 %src1, %src1
br label %exit
@@ -54,8 +54,8 @@ define i64 @f3(i64 %src1) {
entry:
%val = load i16 , i16 *@g
%src2 = zext i16 %val to i64
- %cond = icmp ne i64 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp eq i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i64 %src1, %src1
br label %exit
@@ -74,8 +74,8 @@ define i64 @f4(i64 %src1) {
entry:
%val = load i16 , i16 *@g
%src2 = zext i16 %val to i64
- %cond = icmp eq i64 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ne i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i64 %src1, %src1
br label %exit
@@ -95,8 +95,8 @@ define i64 @f5(i64 %src1) {
entry:
%val = load i16 , i16 *@h, align 1
%src2 = zext i16 %val to i64
- %cond = icmp uge i64 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i64 %src1, %src1
br label %exit
@@ -115,8 +115,8 @@ define i64 @f6(i64 %src2) {
entry:
%val = load i16 , i16 *@g
%src1 = zext i16 %val to i64
- %cond = icmp uge i64 %src1, %src2
- br i1 %cond, label %mulb, label %exit
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
mulb:
%mul = mul i64 %src2, %src2
br label %exit
diff --git a/test/CodeGen/SystemZ/int-cmp-44.ll b/test/CodeGen/SystemZ/int-cmp-44.ll
index 85a8788a3bd..1b9a4ae353f 100644
--- a/test/CodeGen/SystemZ/int-cmp-44.ll
+++ b/test/CodeGen/SystemZ/int-cmp-44.ll
@@ -473,8 +473,8 @@ entry:
%xor = xor i32 %val, 1
%add = add i32 %xor, 1000000
call void @foo()
- %cmp = icmp eq i32 %add, 0
- br i1 %cmp, label %store, label %exit, !prof !1
+ %cmp = icmp ne i32 %add, 0
+ br i1 %cmp, label %exit, label %store
store:
store i32 %add, i32 *%ptr
@@ -888,5 +888,3 @@ store:
exit:
ret i64 %res
}
-
-!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/SystemZ/int-cmp-48.ll b/test/CodeGen/SystemZ/int-cmp-48.ll
index 59aed82e363..2a6d9d5fcaf 100644
--- a/test/CodeGen/SystemZ/int-cmp-48.ll
+++ b/test/CodeGen/SystemZ/int-cmp-48.ll
@@ -52,7 +52,7 @@ exit:
define double @f3(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f3:
; CHECK: tm 0(%r2), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%byte = load i8 , i8 *%src
%and = and i8 %byte, 1
@@ -80,7 +80,7 @@ define double @f4(i8 *%src, double %a, double %b) {
define double @f5(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f5:
; CHECK: tm 0(%r2), 1
-; CHECK: je {{\.L.*}}
+; CHECK: jne {{\.L.*}}
; CHECK: br %r14
%byte = load i8 , i8 *%src
%and = and i8 %byte, 1
@@ -93,7 +93,7 @@ define double @f5(i8 *%src, double %a, double %b) {
define double @f6(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f6:
; CHECK: tm 0(%r2), 254
-; CHECK: jno {{\.L.*}}
+; CHECK: jo {{\.L.*}}
; CHECK: br %r14
%byte = load i8 , i8 *%src
%and = and i8 %byte, 254
@@ -106,7 +106,7 @@ define double @f6(i8 *%src, double %a, double %b) {
define double @f7(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f7:
; CHECK: tm 0(%r2), 254
-; CHECK: jo {{\.L.*}}
+; CHECK: jno {{\.L.*}}
; CHECK: br %r14
%byte = load i8 , i8 *%src
%and = and i8 %byte, 254
@@ -121,7 +121,7 @@ define double @f8(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f8:
; CHECK: llc [[REG:%r[0-5]]], 0(%r2)
; CHECK: tmll [[REG]], 3
-; CHECK: jnh {{\.L.*}}
+; CHECK: jh {{\.L.*}}
; CHECK: br %r14
%byte = load i8 , i8 *%src
%and = and i8 %byte, 3
@@ -135,7 +135,7 @@ define double @f9(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f9:
; CHECK: llc [[REG:%r[0-5]]], 0(%r2)
; CHECK: tmll [[REG]], 3
-; CHECK: jnl {{\.L.*}}
+; CHECK: jl {{\.L.*}}
; CHECK: br %r14
%byte = load i8 , i8 *%src
%and = and i8 %byte, 3
@@ -148,7 +148,7 @@ define double @f9(i8 *%src, double %a, double %b) {
define double @f10(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f10:
; CHECK: tm 4095(%r2), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%ptr = getelementptr i8, i8 *%src, i64 4095
%byte = load i8 , i8 *%ptr
@@ -162,7 +162,7 @@ define double @f10(i8 *%src, double %a, double %b) {
define double @f11(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f11:
; CHECK: tmy 4096(%r2), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%ptr = getelementptr i8, i8 *%src, i64 4096
%byte = load i8 , i8 *%ptr
@@ -176,7 +176,7 @@ define double @f11(i8 *%src, double %a, double %b) {
define double @f12(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f12:
; CHECK: tmy 524287(%r2), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%ptr = getelementptr i8, i8 *%src, i64 524287
%byte = load i8 , i8 *%ptr
@@ -191,7 +191,7 @@ define double @f13(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f13:
; CHECK: agfi %r2, 524288
; CHECK: tm 0(%r2), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%ptr = getelementptr i8, i8 *%src, i64 524288
%byte = load i8 , i8 *%ptr
@@ -205,7 +205,7 @@ define double @f13(i8 *%src, double %a, double %b) {
define double @f14(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f14:
; CHECK: tmy -524288(%r2), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%ptr = getelementptr i8, i8 *%src, i64 -524288
%byte = load i8 , i8 *%ptr
@@ -220,7 +220,7 @@ define double @f15(i8 *%src, double %a, double %b) {
; CHECK-LABEL: f15:
; CHECK: agfi %r2, -524289
; CHECK: tm 0(%r2), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%ptr = getelementptr i8, i8 *%src, i64 -524289
%byte = load i8 , i8 *%ptr
@@ -234,7 +234,7 @@ define double @f15(i8 *%src, double %a, double %b) {
define double @f16(i8 *%src, i64 %index, double %a, double %b) {
; CHECK-LABEL: f16:
; CHECK: tm 0({{%r[1-5]}}), 1
-; CHECK: jne {{\.L.*}}
+; CHECK: je {{\.L.*}}
; CHECK: br %r14
%ptr = getelementptr i8, i8 *%src, i64 %index
%byte = load i8 , i8 *%ptr
diff --git a/test/CodeGen/SystemZ/tdc-06.ll b/test/CodeGen/SystemZ/tdc-06.ll
index 14545f25cf2..a099c00d227 100644
--- a/test/CodeGen/SystemZ/tdc-06.ll
+++ b/test/CodeGen/SystemZ/tdc-06.ll
@@ -26,27 +26,25 @@ nonzero:
nonzeroord:
; CHECK: lhi %r2, 2
; CHECK: tcdb %f0, 48
-; CHECK: je [[FINITE:.]]
+; CHECK: jl [[RET]]
%abs = tail call double @llvm.fabs.f64(double %x)
%testinf = fcmp oeq double %abs, 0x7FF0000000000000
br i1 %testinf, label %ret, label %finite, !prof !1
-ret:
-; CHECK: [[RET]]:
-; CHECK: br %r14
- %res = phi i32 [ 5, %entry ], [ 1, %nonzero ], [ 2, %nonzeroord ], [ %finres, %finite ]
- ret i32 %res
-
finite:
; CHECK: lhi %r2, 3
; CHECK: tcdb %f0, 831
; CHECK: blr %r14
; CHECK: lhi %r2, 4
-; CHECK: br %r14
%testnormal = fcmp uge double %abs, 0x10000000000000
%finres = select i1 %testnormal, i32 3, i32 4
br label %ret
+ret:
+; CHECK: [[RET]]:
+; CHECK: br %r14
+ %res = phi i32 [ 5, %entry ], [ 1, %nonzero ], [ 2, %nonzeroord ], [ %finres, %finite ]
+ ret i32 %res
}
!1 = !{!"branch_weights", i32 1, i32 1}
diff --git a/test/CodeGen/Thumb/thumb-shrink-wrapping.ll b/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
index f6c137b9e41..6114b72569e 100644
--- a/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
+++ b/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
@@ -1,12 +1,11 @@
-; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumb-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumb-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE --check-prefix=ENABLE-V4T
-; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumbv5-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumbv5-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE --check-prefix=ENABLE-V5T
-; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumb-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumb-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE --check-prefix=DISABLE-V4T
-; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumbv5-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumbv5-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE --check-prefix=DISABLE-V5T
-
;
; Note: Lots of tests use inline asm instead of regular calls.
; This allows to have a better control on what the allocation will do.
@@ -16,8 +15,6 @@
; edges.
; Also disable the late if-converter as it makes harder to reason on
; the diffs.
-; Disable tail-duplication during placement, as v4t vs v5t get different
-; results due to branches not being analyzable under v5
; Initial motivating example: Simple diamond with a call just on one side.
; CHECK-LABEL: foo:
diff --git a/test/CodeGen/Thumb2/cbnz.ll b/test/CodeGen/Thumb2/cbnz.ll
index e11c4038678..5c0bb5bfe1c 100644
--- a/test/CodeGen/Thumb2/cbnz.ll
+++ b/test/CodeGen/Thumb2/cbnz.ll
@@ -26,7 +26,7 @@ t:
call void @x()
call void @x()
call void @x()
- ; CHECK: cbz
+ ; CHECK: cbnz
%q = icmp eq i32 %y, 0
br i1 %q, label %t2, label %f
diff --git a/test/CodeGen/Thumb2/ifcvt-compare.ll b/test/CodeGen/Thumb2/ifcvt-compare.ll
index 688195f579e..7b5ce4fa3f5 100644
--- a/test/CodeGen/Thumb2/ifcvt-compare.ll
+++ b/test/CodeGen/Thumb2/ifcvt-compare.ll
@@ -4,7 +4,7 @@ declare void @x()
define void @f0(i32 %x) optsize {
; CHECK-LABEL: f0:
- ; CHECK: cbz
+ ; CHECK: cbnz
%p = icmp eq i32 %x, 0
br i1 %p, label %t, label %f
diff --git a/test/CodeGen/Thumb2/v8_IT_4.ll b/test/CodeGen/Thumb2/v8_IT_4.ll
index 5901a8e81ca..5a80d8cd7b4 100644
--- a/test/CodeGen/Thumb2/v8_IT_4.ll
+++ b/test/CodeGen/Thumb2/v8_IT_4.ll
@@ -12,11 +12,10 @@
define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) {
; CHECK-LABEL: _ZNKSs7compareERKSs:
-; CHECK: cbz r0,
-; CHECK-NEXT: %bb1
-; CHECK-NEXT: pop.w
+; CHECK: cbnz r0,
; CHECK-NEXT: %bb
; CHECK-NEXT: sub{{(.w)?}} r0, r{{[0-9]+}}, r{{[0-9]+}}
+; CHECK-NEXT: %bb1
; CHECK-NEXT: pop.w
entry:
%0 = tail call arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
diff --git a/test/CodeGen/WebAssembly/phi.ll b/test/CodeGen/WebAssembly/phi.ll
index e25622bca43..747ae5cb15d 100644
--- a/test/CodeGen/WebAssembly/phi.ll
+++ b/test/CodeGen/WebAssembly/phi.ll
@@ -8,9 +8,8 @@ target triple = "wasm32-unknown-unknown"
; Basic phi triangle.
; CHECK-LABEL: test0:
-; CHECK: return $0
-; CHECK: div_s $push[[NUM0:[0-9]+]]=, $0, $pop[[NUM1:[0-9]+]]{{$}}
-; CHECK: return $pop[[NUM0]]{{$}}
+; CHECK: div_s $[[NUM0:[0-9]+]]=, $0, $pop[[NUM1:[0-9]+]]{{$}}
+; CHECK: return $[[NUM0]]{{$}}
define i32 @test0(i32 %p) {
entry:
%t = icmp slt i32 %p, 0
diff --git a/test/CodeGen/X86/2008-11-29-ULT-Sign.ll b/test/CodeGen/X86/2008-11-29-ULT-Sign.ll
index fa8932b5170..03442d631ac 100644
--- a/test/CodeGen/X86/2008-11-29-ULT-Sign.ll
+++ b/test/CodeGen/X86/2008-11-29-ULT-Sign.ll
@@ -4,8 +4,8 @@ target triple = "i686-pc-linux-gnu"
define i32 @a(i32 %x) nounwind {
entry:
- %cmp = icmp uge i32 %x, -2147483648 ; <i1> [#uses=1]
- br i1 %cmp, label %if.then, label %if.end
+ %cmp = icmp ult i32 %x, -2147483648 ; <i1> [#uses=1]
+ br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
%call = call i32 (...) @b() ; <i32> [#uses=0]
diff --git a/test/CodeGen/X86/add.ll b/test/CodeGen/X86/add.ll
index 48f40463d0e..df1bc9b6ee7 100644
--- a/test/CodeGen/X86/add.ll
+++ b/test/CodeGen/X86/add.ll
@@ -30,8 +30,7 @@ entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%sum = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
- %notobit = xor i1 1, %obit
- br i1 %notobit, label %normal, label %overflow
+ br i1 %obit, label %overflow, label %normal
normal:
store i32 0, i32* %X
@@ -54,8 +53,7 @@ entry:
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%sum = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
- %notobit = xor i1 1, %obit
- br i1 %notobit, label %normal, label %carry
+ br i1 %obit, label %carry, label %normal
normal:
store i32 0, i32* %X
diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll
index 314b50abbb3..1914b5134be 100644
--- a/test/CodeGen/X86/avx-splat.ll
+++ b/test/CodeGen/X86/avx-splat.ll
@@ -62,10 +62,8 @@ define <8 x float> @funcE() nounwind {
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ## implicit-def: %YMM0
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: je LBB4_1
-; CHECK-NEXT: ## BB#2: ## %__load_and_broadcast_32.exit1249
-; CHECK-NEXT: retq
-; CHECK-NEXT: LBB4_1: ## %load.i1247
+; CHECK-NEXT: jne LBB4_2
+; CHECK-NEXT: ## BB#1: ## %load.i1247
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-32, %rsp
@@ -73,6 +71,7 @@ define <8 x float> @funcE() nounwind {
; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %ymm0
; CHECK-NEXT: movq %rbp, %rsp
; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: LBB4_2: ## %__load_and_broadcast_32.exit1249
; CHECK-NEXT: retq
allocas:
%udx495 = alloca [18 x [18 x float]], align 32
diff --git a/test/CodeGen/X86/avx512-cmp.ll b/test/CodeGen/X86/avx512-cmp.ll
index e556495bfb4..78df51be5c3 100644
--- a/test/CodeGen/X86/avx512-cmp.ll
+++ b/test/CodeGen/X86/avx512-cmp.ll
@@ -69,14 +69,13 @@ define float @test5(float %p) #0 {
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vucomiss %xmm1, %xmm0
; ALL-NEXT: jne LBB3_1
-; ALL-NEXT: jp LBB3_1
-; ALL-NEXT: ## BB#2: ## %return
-; ALL-NEXT: retq
+; ALL-NEXT: jnp LBB3_2
; ALL-NEXT: LBB3_1: ## %if.end
; ALL-NEXT: seta %al
; ALL-NEXT: movzbl %al, %eax
; ALL-NEXT: leaq {{.*}}(%rip), %rcx
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ALL-NEXT: LBB3_2: ## %return
; ALL-NEXT: retq
entry:
%cmp = fcmp oeq float %p, 0.000000e+00
diff --git a/test/CodeGen/X86/bt.ll b/test/CodeGen/X86/bt.ll
index 0279928069b..6576f33a5b9 100644
--- a/test/CodeGen/X86/bt.ll
+++ b/test/CodeGen/X86/bt.ll
@@ -49,7 +49,7 @@ entry:
%tmp29 = lshr i32 %x, %n
%tmp3 = and i32 1, %tmp29
%tmp4 = icmp eq i32 %tmp3, 0
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock, !prof !1
+ br i1 %tmp4, label %bb, label %UnifiedReturnBlock
bb:
call void @foo()
@@ -89,7 +89,7 @@ entry:
%tmp29 = ashr i32 %x, %n
%tmp3 = and i32 1, %tmp29
%tmp4 = icmp eq i32 %tmp3, 0
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock, !prof !1
+ br i1 %tmp4, label %bb, label %UnifiedReturnBlock
bb:
call void @foo()
@@ -109,7 +109,7 @@ entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %tmp29, %x
%tmp4 = icmp eq i32 %tmp3, 0
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock, !prof !1
+ br i1 %tmp4, label %bb, label %UnifiedReturnBlock
bb:
call void @foo()
@@ -129,7 +129,7 @@ entry:
%tmp29 = shl i32 1, %n
%tmp3 = and i32 %x, %tmp29
%tmp4 = icmp eq i32 %tmp3, 0
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock, !prof !1
+ br i1 %tmp4, label %bb, label %UnifiedReturnBlock
bb:
call void @foo()
@@ -608,5 +608,3 @@ entry:
%tobool = icmp ne i64 %and1, 0
ret i1 %tobool
}
-
-!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/X86/critical-edge-split-2.ll b/test/CodeGen/X86/critical-edge-split-2.ll
index b4a808af66e..d5878bd1a74 100644
--- a/test/CodeGen/X86/critical-edge-split-2.ll
+++ b/test/CodeGen/X86/critical-edge-split-2.ll
@@ -24,7 +24,6 @@ cond.end.i: ; preds = %entry
; CHECK-LABEL: test1:
; CHECK: testb %dil, %dil
-; CHECK: je LBB0_1
-; CHECK: retq
-; CHECK: LBB0_1:
+; CHECK: jne LBB0_2
; CHECK: divl
+; CHECK: LBB0_2:
diff --git a/test/CodeGen/X86/fp-une-cmp.ll b/test/CodeGen/X86/fp-une-cmp.ll
index 1b5af5aba36..e3b2a04060b 100644
--- a/test/CodeGen/X86/fp-une-cmp.ll
+++ b/test/CodeGen/X86/fp-une-cmp.ll
@@ -36,8 +36,8 @@ define double @rdar_7859988(double %x, double %y) nounwind readnone optsize ssp
entry:
%mul = fmul double %x, %y
- %cmp = fcmp oeq double %mul, 0.000000e+00
- br i1 %cmp, label %bb1, label %bb2
+ %cmp = fcmp une double %mul, 0.000000e+00
+ br i1 %cmp, label %bb2, label %bb1
bb1:
%add = fadd double %mul, -1.000000e+00
diff --git a/test/CodeGen/X86/jump_sign.ll b/test/CodeGen/X86/jump_sign.ll
index 5d6baad7068..ca3e8bf71eb 100644
--- a/test/CodeGen/X86/jump_sign.ll
+++ b/test/CodeGen/X86/jump_sign.ll
@@ -6,7 +6,7 @@ entry:
; CHECK: jns
%tmp1 = add i32 %X, 1 ; <i32> [#uses=1]
%tmp = icmp slt i32 %tmp1, 0 ; <i1> [#uses=1]
- br i1 %tmp, label %cond_true, label %cond_next, !prof !1
+ br i1 %tmp, label %cond_true, label %cond_next
cond_true: ; preds = %entry
%tmp2 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
@@ -303,5 +303,3 @@ if.then:
if.end:
ret i32 undef
}
-
-!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/X86/machine-cse.ll b/test/CodeGen/X86/machine-cse.ll
index b6f3b454a5a..9853a7cde67 100644
--- a/test/CodeGen/X86/machine-cse.ll
+++ b/test/CodeGen/X86/machine-cse.ll
@@ -86,8 +86,8 @@ entry:
; CHECK-LABEL: cross_mbb_phys_cse:
; CHECK: cmpl
; CHECK: ja
- %cmp = icmp ule i32 %a, %b
- br i1 %cmp, label %if.end, label %return
+ %cmp = icmp ugt i32 %a, %b
+ br i1 %cmp, label %return, label %if.end
if.end: ; preds = %entry
; CHECK-NOT: cmpl
diff --git a/test/CodeGen/X86/shift-double.ll b/test/CodeGen/X86/shift-double.ll
index f0bef7ae2f3..8594c071329 100644
--- a/test/CodeGen/X86/shift-double.ll
+++ b/test/CodeGen/X86/shift-double.ll
@@ -14,13 +14,11 @@ define i64 @test1(i64 %X, i8 %C) nounwind {
; CHECK-NEXT: shll %cl, %eax
; CHECK-NEXT: shldl %cl, %esi, %edx
; CHECK-NEXT: testb $32, %cl
-; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # BB#2:
-; CHECK-NEXT: popl %esi
-; CHECK-NEXT: retl
-; CHECK-NEXT: .LBB0_1:
+; CHECK-NEXT: je .LBB0_2
+; CHECK-NEXT: # BB#1:
; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
%shift.upgrd.1 = zext i8 %C to i64 ; <i64> [#uses=1]
@@ -39,14 +37,12 @@ define i64 @test2(i64 %X, i8 %C) nounwind {
; CHECK-NEXT: sarl %cl, %edx
; CHECK-NEXT: shrdl %cl, %esi, %eax
; CHECK-NEXT: testb $32, %cl
-; CHECK-NEXT: jne .LBB1_1
-; CHECK-NEXT: # BB#2:
-; CHECK-NEXT: popl %esi
-; CHECK-NEXT: retl
-; CHECK-NEXT: .LBB1_1:
+; CHECK-NEXT: je .LBB1_2
+; CHECK-NEXT: # BB#1:
; CHECK-NEXT: sarl $31, %esi
; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: movl %esi, %edx
+; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
%shift.upgrd.2 = zext i8 %C to i64 ; <i64> [#uses=1]
@@ -65,13 +61,11 @@ define i64 @test3(i64 %X, i8 %C) nounwind {
; CHECK-NEXT: shrl %cl, %edx
; CHECK-NEXT: shrdl %cl, %esi, %eax
; CHECK-NEXT: testb $32, %cl
-; CHECK-NEXT: jne .LBB2_1
-; CHECK-NEXT: # BB#2:
-; CHECK-NEXT: popl %esi
-; CHECK-NEXT: retl
-; CHECK-NEXT: .LBB2_1:
+; CHECK-NEXT: je .LBB2_2
+; CHECK-NEXT: # BB#1:
; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
%shift.upgrd.3 = zext i8 %C to i64 ; <i64> [#uses=1]
diff --git a/test/CodeGen/X86/sink-hoist.ll b/test/CodeGen/X86/sink-hoist.ll
index f5d470912a1..972fbdf48cb 100644
--- a/test/CodeGen/X86/sink-hoist.ll
+++ b/test/CodeGen/X86/sink-hoist.ll
@@ -26,8 +26,7 @@ define double @foo(double %x, double %y, i1 %c) nounwind {
; CHECK-LABEL: split:
; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: jne
-; CHECK: ret
+; CHECK-NEXT: je
; CHECK: divsd
; CHECK: movapd
; CHECK: ret
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll
index 75212f17ab3..f711dc61574 100644
--- a/test/CodeGen/X86/sse-scalar-fp-arith.ll
+++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll
@@ -1110,12 +1110,10 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
; AVX1-LABEL: add_ss_mask:
; AVX1: # BB#0:
; AVX1-NEXT: testb $1, %dil
-; AVX1-NEXT: jne .LBB62_1
-; AVX1-NEXT: # BB#2:
-; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
-; AVX1-NEXT: retq
-; AVX1-NEXT: .LBB62_1:
+; AVX1-NEXT: je .LBB62_2
+; AVX1-NEXT: # BB#1:
; AVX1-NEXT: vaddss %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: .LBB62_2:
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; AVX1-NEXT: retq
;
@@ -1167,12 +1165,10 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
; AVX1-LABEL: add_sd_mask:
; AVX1: # BB#0:
; AVX1-NEXT: testb $1, %dil
-; AVX1-NEXT: jne .LBB63_1
-; AVX1-NEXT: # BB#2:
-; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
-; AVX1-NEXT: retq
-; AVX1-NEXT: .LBB63_1:
+; AVX1-NEXT: je .LBB63_2
+; AVX1-NEXT: # BB#1:
; AVX1-NEXT: vaddsd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: .LBB63_2:
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; AVX1-NEXT: retq
;
diff --git a/test/CodeGen/X86/testb-je-fusion.ll b/test/CodeGen/X86/testb-je-fusion.ll
index 7df4183ba08..9e946ae4ca3 100644
--- a/test/CodeGen/X86/testb-je-fusion.ll
+++ b/test/CodeGen/X86/testb-je-fusion.ll
@@ -9,7 +9,7 @@ define i32 @check_flag(i32 %flags, ...) nounwind {
entry:
%and = and i32 %flags, 512
%tobool = icmp eq i32 %and, 0
- br i1 %tobool, label %if.end, label %if.then, !prof !1
+ br i1 %tobool, label %if.end, label %if.then
if.then:
br label %if.end
@@ -18,4 +18,3 @@ if.end:
%hasflag = phi i32 [ 1, %if.then ], [ 0, %entry ]
ret i32 %hasflag
}
-!1 = !{!"branch_weights", i32 1, i32 2}