diff options
author | Sanjoy Das <sanjoy@playingwithpointers.com> | 2017-12-04 19:21:58 +0000 |
---|---|---|
committer | Sanjoy Das <sanjoy@playingwithpointers.com> | 2017-12-04 19:21:58 +0000 |
commit | 3745f0675ab507814af923afc22739a976488782 (patch) | |
tree | cd39bfdb77c47eeb2535d0ba5002f840d16b8175 /test | |
parent | 998cdc4f56d93f9f15bab071021482eeea17647f (diff) |
[BypassSlowDivision] Improve our handling of divisions by constants
(This reapplies r314253. r314253 was reverted on r314482 because of a
correctness regression on P100, but that regression was identified to be
something else.)
Summary:
Don't bail out on constant divisors for divisions that can be narrowed without
introducing control flow . This gives us a 32 bit multiply instead of an
emulated 64 bit multiply in the generated PTX assembly.
Reviewers: jlebar
Subscribers: jholewinski, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D38265
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319677 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll b/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll index 4846d52f4d2..4d824e450ff 100644 --- a/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll +++ b/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll @@ -27,3 +27,80 @@ define void @rem_only(i64 %a, i64 %b, i64* %retptr) { store i64 %d, i64* %retptr ret void } + +; CHECK-LABEL: @udiv_by_constant( +define i64 @udiv_by_constant(i32 %a) { +; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_ZEXT]] to i32 +; CHECK-NEXT: [[TMP2:%.*]] = udiv i32 [[TMP1]], 50 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: ret i64 [[TMP3]] + + %a.zext = zext i32 %a to i64 + %wide.div = udiv i64 %a.zext, 50 + ret i64 %wide.div +} + +; CHECK-LABEL: @urem_by_constant( +define i64 @urem_by_constant(i32 %a) { +; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_ZEXT]] to i32 +; CHECK-NEXT: [[TMP2:%.*]] = urem i32 [[TMP1]], 50 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: ret i64 [[TMP3]] + + %a.zext = zext i32 %a to i64 + %wide.div = urem i64 %a.zext, 50 + ret i64 %wide.div +} + +; Negative test: instead of emitting a runtime check on %a, we prefer to let the +; DAGCombiner transform this division by constant into a multiplication (with a +; "magic constant"). +; +; CHECK-LABEL: @udiv_by_constant_negative_0( +define i64 @udiv_by_constant_negative_0(i64 %a) { +; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv i64 [[A:%.*]], 50 +; CHECK-NEXT: ret i64 [[WIDE_DIV]] + + %wide.div = udiv i64 %a, 50 + ret i64 %wide.div +} + +; Negative test: while we know the dividend is short, the divisor isn't. This +; test is here for completeness, but instcombine will optimize this to return 0. +; +; CHECK-LABEL: @udiv_by_constant_negative_1( +define i64 @udiv_by_constant_negative_1(i32 %a) { +; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64 +; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv i64 [[A_ZEXT]], 8589934592 +; CHECK-NEXT: ret i64 [[WIDE_DIV]] + + %a.zext = zext i32 %a to i64 + %wide.div = udiv i64 %a.zext, 8589934592 ;; == 1 << 33 + ret i64 %wide.div +} + +; URem version of udiv_by_constant_negative_0 +; +; CHECK-LABEL: @urem_by_constant_negative_0( +define i64 @urem_by_constant_negative_0(i64 %a) { +; CHECK-NEXT: [[WIDE_DIV:%.*]] = urem i64 [[A:%.*]], 50 +; CHECK-NEXT: ret i64 [[WIDE_DIV]] + + %wide.div = urem i64 %a, 50 + ret i64 %wide.div +} + +; URem version of udiv_by_constant_negative_1 +; +; CHECK-LABEL: @urem_by_constant_negative_1( +define i64 @urem_by_constant_negative_1(i32 %a) { +; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64 +; CHECK-NEXT: [[WIDE_DIV:%.*]] = urem i64 [[A_ZEXT]], 8589934592 +; CHECK-NEXT: ret i64 [[WIDE_DIV]] + + %a.zext = zext i32 %a to i64 + %wide.div = urem i64 %a.zext, 8589934592 ;; == 1 << 33 + ret i64 %wide.div +} |