summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorJames Molloy <james.molloy@arm.com>2016-09-12 14:30:48 +0000
committerJames Molloy <james.molloy@arm.com>2016-09-12 14:30:48 +0000
commit91db09d0e862832164c6c80c4d14f471d902429f (patch)
tree49402c35ac264883da1f8d30b0e1ab67f4124ec2 /test/CodeGen
parentc04a5a1fc46d588a90a50d48be0aaa94c526bd9a (diff)
[Thumb] Teach ISel how to lower compares of AND bitmasks efficiently
For the common pattern (CMPZ (AND x, #bitmask), #0), we can do some more efficient instruction selection if the bitmask is one consecutive sequence of set bits (32 - clz(bm) - ctz(bm) == popcount(bm)). 1) If the bitmask touches the LSB, then we can remove all the upper bits and set the flags by doing one LSLS. 2) If the bitmask touches the MSB, then we can remove all the lower bits and set the flags with one LSRS. 3) If the bitmask has popcount == 1 (only one set bit), we can shift that bit into the sign bit with one LSLS and change the condition query from NE/EQ to MI/PL (we could also implement this by shifting into the carry bit and branching on BCC/BCS). 4) Otherwise, we can emit a sequence of LSLS+LSRS to remove the upper and lower zero bits of the mask. 1-3 require only one 16-bit instruction and can elide the CMP. 4 requires two 16-bit instructions but can elide the CMP and doesn't require materializing a complex immediate, so is also a win. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@281215 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/and-cmpz.ll71
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll15
-rw-r--r--test/CodeGen/ARM/arm-shrink-wrapping.ll4
-rw-r--r--test/CodeGen/ARM/call-tc.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-branch-folding.ll2
-rw-r--r--test/CodeGen/Thumb/thumb-shrink-wrapping.ll8
-rw-r--r--test/CodeGen/Thumb2/float-ops.ll12
7 files changed, 93 insertions, 21 deletions
diff --git a/test/CodeGen/ARM/and-cmpz.ll b/test/CodeGen/ARM/and-cmpz.ll
new file mode 100644
index 00000000000..809dc6cc6ba
--- /dev/null
+++ b/test/CodeGen/ARM/and-cmpz.ll
@@ -0,0 +1,71 @@
+; RUN: llc -mtriple=thumbv7m-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=T2
+; RUN: llc -mtriple=thumbv6m-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=T1
+
+; CHECK-LABEL: single_bit:
+; CHECK: lsls r0, r0, #23
+; T2-NEXT: mov
+; T2-NEXT: it
+; T1-NEXT: bmi
+define i32 @single_bit(i32 %p) {
+ %a = and i32 %p, 256
+ %b = icmp eq i32 %a, 0
+ br i1 %b, label %true, label %false
+
+true:
+ ret i32 1
+
+false:
+ ret i32 2
+}
+
+; CHECK-LABEL: multi_bit_lsb_ubfx:
+; CHECK: lsls r0, r0, #24
+; T2-NEXT: mov
+; T2-NEXT: it
+; T1-NEXT: beq
+define i32 @multi_bit_lsb_ubfx(i32 %p) {
+ %a = and i32 %p, 255
+ %b = icmp eq i32 %a, 0
+ br i1 %b, label %true, label %false
+
+true:
+ ret i32 1
+
+false:
+ ret i32 2
+}
+
+; CHECK-LABEL: multi_bit_msb:
+; CHECK: lsrs r0, r0, #24
+; T2-NEXT: mov
+; T2-NEXT: it
+; T1-NEXT: beq
+define i32 @multi_bit_msb(i32 %p) {
+ %a = and i32 %p, 4278190080 ; 0xff000000
+ %b = icmp eq i32 %a, 0
+ br i1 %b, label %true, label %false
+
+true:
+ ret i32 1
+
+false:
+ ret i32 2
+}
+
+; CHECK-LABEL: multi_bit_nosb:
+; T1: lsls r0, r0, #8
+; T1-NEXT: lsrs r0, r0, #24
+; T2: tst.w
+; T2-NEXT: it
+; T1-NEXT: beq
+define i32 @multi_bit_nosb(i32 %p) {
+ %a = and i32 %p, 16711680 ; 0x00ff0000
+ %b = icmp eq i32 %a, 0
+ br i1 %b, label %true, label %false
+
+true:
+ ret i32 1
+
+false:
+ ret i32 2
+}
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 04eae8f9afe..c766fe499e4 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -28,12 +28,10 @@ tailrecurse: ; preds = %sw.bb, %entry
; ARM: ands {{r[0-9]+}}, {{r[0-9]+}}, #3
; ARM-NEXT: beq
-; THUMB: movs r[[R0:[0-9]+]], #3
-; THUMB-NEXT: ands r[[R0]], r
-; THUMB-NEXT: cmp r[[R0]], #0
+; THUMB: lsls r[[R0:[0-9]+]], r{{.*}}, #30
; THUMB-NEXT: beq
-; T2: ands {{r[0-9]+}}, {{r[0-9]+}}, #3
+; T2: lsls r[[R0:[0-9]+]], r{{.*}}, #30
; T2-NEXT: beq
%and = and i32 %0, 3
@@ -93,7 +91,7 @@ entry:
%1 = load i8, i8* %0, align 1
%2 = zext i8 %1 to i32
; ARM: ands
-; THUMB: ands
+; THUMB: lsls
; T2: ands
; V8: ands
; V8-NEXT: beq
@@ -150,10 +148,9 @@ define i32 @test_tst_assessment(i1 %lhs, i1 %rhs) {
%rhs32 = zext i1 %rhs to i32
%diff = sub nsw i32 %lhs32, %rhs32
; ARM: tst r1, #1
-; THUMB: movs [[RTMP:r[0-9]+]], #1
-; THUMB: tst r1, [[RTMP]]
-; T2: tst.w r1, #1
-; V8: tst.w r1, #1
+; THUMB: lsls r1, r1, #31
+; T2: lsls r1, r1, #31
+; V8: lsls r1, r1, #31
ret i32 %diff
}
diff --git a/test/CodeGen/ARM/arm-shrink-wrapping.ll b/test/CodeGen/ARM/arm-shrink-wrapping.ll
index 4ab090f22b7..4866eeb7ced 100644
--- a/test/CodeGen/ARM/arm-shrink-wrapping.ll
+++ b/test/CodeGen/ARM/arm-shrink-wrapping.ll
@@ -638,12 +638,12 @@ declare double @llvm.pow.f64(double, double)
; during PEI with shrink-wrapping enable.
; CHECK-LABEL: debug_info:
;
-; ENABLE: tst{{(\.w)?}} r2, #1
+; ENABLE: {{tst r2, #1|lsls r1, r2, #31}}
; ENABLE-NEXT: beq [[BB13:LBB[0-9_]+]]
;
; CHECK: push
;
-; DISABLE: tst{{(\.w)?}} r2, #1
+; DISABLE: {{tst r2, #1|lsls r1, r2, #31}}
; DISABLE-NEXT: beq [[BB13:LBB[0-9_]+]]
;
; CHECK: bl{{x?}} _pow
diff --git a/test/CodeGen/ARM/call-tc.ll b/test/CodeGen/ARM/call-tc.ll
index 2277a585336..c5cfb9def33 100644
--- a/test/CodeGen/ARM/call-tc.ll
+++ b/test/CodeGen/ARM/call-tc.ll
@@ -120,7 +120,7 @@ if.end: ; preds = %entry
br i1 %tobool2, label %if.end5, label %if.then3
if.then3: ; preds = %if.end
-; CHECKT2D: bne.w _b
+; CHECKT2D: bmi.w _b
%call4 = tail call i32 @b(i32 %x) nounwind
br label %return
diff --git a/test/CodeGen/ARM/debug-info-branch-folding.ll b/test/CodeGen/ARM/debug-info-branch-folding.ll
index b4e48c4c423..d030f004f6d 100644
--- a/test/CodeGen/ARM/debug-info-branch-folding.ll
+++ b/test/CodeGen/ARM/debug-info-branch-folding.ll
@@ -3,7 +3,7 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
target triple = "thumbv7-apple-macosx10.6.7"
;CHECK: vadd.f32 q4, q8, q8
-;CHECK-NEXT: Ltmp1
+;CHECK-NEXT: Ltmp
;CHECK-NEXT: LBB0_1
;CHECK:@DEBUG_VALUE: x <- %Q4{{$}}
diff --git a/test/CodeGen/Thumb/thumb-shrink-wrapping.ll b/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
index 0fa790cd69a..6114b72569e 100644
--- a/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
+++ b/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
@@ -650,11 +650,14 @@ define i1 @beq_to_bx(i32* %y, i32 %head) {
; CHECK: tst r3, r4
; ENABLE-NEXT: pop {r4}
-; ENABLE-NEXT: pop {r3}
-; ENABLE-NEXT: mov lr, r3
+; ENABLE-NEXT: mov r12, r{{.*}}
+; ENABLE-NEXT: pop {r0}
+; ENABLE-NEXT: mov lr, r0
+; ENABLE-NEXT: mov r0, r12
; CHECK-NEXT: beq [[EXIT_LABEL]]
; CHECK: str r1, [r2]
+; CHECK: str r3, [r2]
; CHECK-NEXT: movs r0, #0
; CHECK-NEXT: [[EXIT_LABEL]]: @ %cleanup
; ENABLE-NEXT: bx lr
@@ -675,6 +678,7 @@ if.end:
if.end4:
store i32 %head, i32* %y, align 4
+ store volatile i32 %z, i32* %y, align 4
br label %cleanup
cleanup:
diff --git a/test/CodeGen/Thumb2/float-ops.ll b/test/CodeGen/Thumb2/float-ops.ll
index c9f93f2d613..f4c0ef08e84 100644
--- a/test/CodeGen/Thumb2/float-ops.ll
+++ b/test/CodeGen/Thumb2/float-ops.ll
@@ -259,9 +259,9 @@ define i64 @bitcast_d_to_i(double %a) {
define float @select_f(float %a, float %b, i1 %c) {
; CHECK-LABEL: select_f:
-; NONE: tst.w r2, #1
+; NONE: lsls r2, r2, #31
; NONE: moveq r0, r1
-; HARD: tst.w r0, #1
+; HARD: lsls r0, r0, #31
; VFP4-ALL: vmovne.f32 s1, s0
; VFP4-ALL: vmov.f32 s0, s1
; FP-ARMv8: vseleq.f32 s0, s1, s0
@@ -271,18 +271,18 @@ define float @select_f(float %a, float %b, i1 %c) {
define double @select_d(double %a, double %b, i1 %c) {
; CHECK-LABEL: select_d:
-; NONE: ldr.w [[REG:r[0-9]+]], [sp]
-; NONE: ands [[REG]], [[REG]], #1
+; NONE: ldr{{(.w)?}} [[REG:r[0-9]+]], [sp]
+; NONE: lsls{{(.w)?}} [[REG]], [[REG]], #31
; NONE: moveq r0, r2
; NONE: moveq r1, r3
-; SP: ands r0, r0, #1
+; SP: lsls r0, r0, #31
; SP-DAG: vmov [[ALO:r[0-9]+]], [[AHI:r[0-9]+]], d0
; SP-DAG: vmov [[BLO:r[0-9]+]], [[BHI:r[0-9]+]], d1
; SP: itt ne
; SP-DAG: movne [[BLO]], [[ALO]]
; SP-DAG: movne [[BHI]], [[AHI]]
; SP: vmov d0, [[BLO]], [[BHI]]
-; DP: tst.w r0, #1
+; DP: lsls r0, r0, #31
; VFP4-DP: vmovne.f64 d1, d0
; VFP4-DP: vmov.f64 d0, d1
; FP-ARMV8: vseleq.f64 d0, d1, d0