summaryrefslogtreecommitdiff
path: root/test/Transforms/LoopUnroll
diff options
context:
space:
mode:
authorStanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com>2017-02-03 02:20:05 +0000
committerStanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com>2017-02-03 02:20:05 +0000
commit95787fcd27223dd1394a1fb5ac3ea0a80b12c169 (patch)
tree8d469bd45900260f71a736c821e25ea4368576a5 /test/Transforms/LoopUnroll
parent9351d2538cfc1ab64d491b7c15fe757d5a1e3217 (diff)
[AMDGPU] Unroll preferences improvements
Exit loop analysis early if suitable private access found. Do not account for GEPs which are invariant to loop induction variable. Do not account for Allocas which are too big to fit into register file anyway. Add option for tuning: -amdgpu-unroll-threshold-private. Differential Revision: https://reviews.llvm.org/D29473 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@293991 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/LoopUnroll')
-rw-r--r--test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll120
1 files changed, 120 insertions, 0 deletions
diff --git a/test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll b/test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll
new file mode 100644
index 00000000000..4fdff6691ad
--- /dev/null
+++ b/test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll
@@ -0,0 +1,120 @@
+; RUN: opt -mtriple=amdgcn-unknown-amdhsa -loop-unroll -S -amdgpu-unroll-threshold-private=20000 %s | FileCheck %s
+
+; Check that we full unroll loop to be able to eliminate alloca
+; CHECK-LABEL: @non_invariant_ind
+; CHECK: for.body:
+; CHECK-NOT: br
+; CHECK: store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+; CHECK: ret void
+
+define void @non_invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
+entry:
+ %arr = alloca [64 x i32], align 4
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ %arrayidx5 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %x
+ %tmp15 = load i32, i32* %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
+ store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %idxprom = sext i32 %i.015 to i64
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
+ %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %add = add nsw i32 %i.015, %tmp1
+ %rem = srem i32 %add, 64
+ %arrayidx3 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %rem
+ store i32 %tmp16, i32* %arrayidx3, align 4
+ %inc = add nuw nsw i32 %i.015, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; Check that we unroll inner loop but not outer
+; CHECK-LABEL: @invariant_ind
+; CHECK: %[[exitcond:[^ ]+]] = icmp eq i32 %{{.*}}, 32
+; CHECK: br i1 %[[exitcond]]
+; CHECK-NOT: icmp eq i32 %{{.*}}, 100
+
+define void @invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
+entry:
+ %arr = alloca [64 x i32], align 4
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ br label %for.cond2.preheader
+
+for.cond2.preheader: ; preds = %for.cond.cleanup5, %entry
+ %i.026 = phi i32 [ 0, %entry ], [ %inc10, %for.cond.cleanup5 ]
+ %idxprom = sext i32 %i.026 to i64
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
+ %tmp15 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ br label %for.body6
+
+for.cond.cleanup: ; preds = %for.cond.cleanup5
+ %arrayidx13 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %x
+ %tmp16 = load i32, i32* %arrayidx13, align 4
+ %arrayidx15 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
+ store i32 %tmp16, i32 addrspace(1)* %arrayidx15, align 4
+ ret void
+
+for.cond.cleanup5: ; preds = %for.body6
+ %inc10 = add nuw nsw i32 %i.026, 1
+ %exitcond27 = icmp eq i32 %inc10, 32
+ br i1 %exitcond27, label %for.cond.cleanup, label %for.cond2.preheader
+
+for.body6: ; preds = %for.body6, %for.cond2.preheader
+ %j.025 = phi i32 [ 0, %for.cond2.preheader ], [ %inc, %for.body6 ]
+ %add = add nsw i32 %j.025, %tmp1
+ %rem = srem i32 %add, 64
+ %arrayidx8 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %rem
+ store i32 %tmp15, i32* %arrayidx8, align 4
+ %inc = add nuw nsw i32 %j.025, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.cond.cleanup5, label %for.body6
+}
+
+; Check we do not enforce unroll if alloca is too big
+; CHECK-LABEL: @too_big
+; CHECK: for.body:
+; CHECK: icmp eq i32 %{{.*}}, 100
+; CHECK: br
+
+define void @too_big(i32 addrspace(1)* nocapture %a, i32 %x) {
+entry:
+ %arr = alloca [256 x i32], align 4
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ %arrayidx5 = getelementptr inbounds [256 x i32], [256 x i32]* %arr, i32 0, i32 %x
+ %tmp15 = load i32, i32* %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
+ store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %idxprom = sext i32 %i.015 to i64
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
+ %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %add = add nsw i32 %i.015, %tmp1
+ %rem = srem i32 %add, 64
+ %arrayidx3 = getelementptr inbounds [256 x i32], [256 x i32]* %arr, i32 0, i32 %rem
+ store i32 %tmp16, i32* %arrayidx3, align 4
+ %inc = add nuw nsw i32 %i.015, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+declare i32 @llvm.amdgcn.workgroup.id.x() #1
+
+declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #1
+
+attributes #1 = { nounwind readnone }