summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2018-02-02 13:35:26 +0000
committerHans Wennborg <hans@hanshq.net>2018-02-02 13:35:26 +0000
commitdcecdaaa04cb66db2d25a1c10725adae143c5768 (patch)
tree9f7e4a16646ad5c2654340175273b255816010de
parent8f86cd921367fe06729ef3147453e4c5cd902cc6 (diff)
Merging r323857:
------------------------------------------------------------------------ r323857 | rogfer01 | 2018-01-31 10:23:43 +0100 (Wed, 31 Jan 2018) | 19 lines [ARM] Allow the scheduler to clone a node with glue to avoid a copy CPSR ↔ GPR. In Thumb 1, with the new ADDCARRY / SUBCARRY the scheduler may need to do copies CPSR ↔ GPR but not all Thumb1 targets implement them. The schedule can attempt, before attempting a copy, to clone the instructions but it does not currently do that for nodes with input glue. In this patch we introduce a target-hook to let the hook decide if a glued machinenode is still eligible for copying. In this case these are ARM::tADCS and ARM::tSBCS . As a follow-up of this change we should actually implement the copies for the Thumb1 targets that do implement them and restrict the hook to the targets that can't really do such copy as these clones are not ideal. This change fixes PR35836. Differential Revision: https://reviews.llvm.org/D42051 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_60@324082 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/TargetInstrInfo.h4
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp20
-rw-r--r--lib/Target/ARM/Thumb1InstrInfo.cpp13
-rw-r--r--lib/Target/ARM/Thumb1InstrInfo.h1
-rw-r--r--test/CodeGen/Thumb/pr35836.ll56
-rw-r--r--test/CodeGen/Thumb/pr35836_2.ll57
6 files changed, 147 insertions, 4 deletions
diff --git a/include/llvm/CodeGen/TargetInstrInfo.h b/include/llvm/CodeGen/TargetInstrInfo.h
index 38a1b33aeca..6b5404be35d 100644
--- a/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/include/llvm/CodeGen/TargetInstrInfo.h
@@ -950,6 +950,10 @@ public:
/// Return true when a target supports MachineCombiner.
virtual bool useMachineCombiner() const { return false; }
+ /// Return true if the given SDNode can be copied during scheduling
+ /// even if it has glue.
+ virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
+
protected:
/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 49f304c8cc8..82337d43c5c 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -1117,22 +1117,34 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
if (!N)
return nullptr;
- if (SU->getNode()->getGluedNode())
+ DEBUG(dbgs() << "Considering duplicating the SU\n");
+ DEBUG(SU->dump(this));
+
+ if (N->getGluedNode() &&
+ !TII->canCopyGluedNodeDuringSchedule(N)) {
+ DEBUG(dbgs()
+ << "Giving up because it has incoming glue and the target does not "
+ "want to copy it\n");
return nullptr;
+ }
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
MVT VT = N->getSimpleValueType(i);
- if (VT == MVT::Glue)
+ if (VT == MVT::Glue) {
+ DEBUG(dbgs() << "Giving up because it has outgoing glue\n");
return nullptr;
- else if (VT == MVT::Other)
+ } else if (VT == MVT::Other)
TryUnfold = true;
}
for (const SDValue &Op : N->op_values()) {
MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
- if (VT == MVT::Glue)
+ if (VT == MVT::Glue && !TII->canCopyGluedNodeDuringSchedule(N)) {
+ DEBUG(dbgs() << "Giving up because it one of the operands is glue and "
+ "the target does not want to copy it\n");
return nullptr;
+ }
}
// If possible unfold instruction.
diff --git a/lib/Target/ARM/Thumb1InstrInfo.cpp b/lib/Target/ARM/Thumb1InstrInfo.cpp
index 49645834e2d..05c98aab6f2 100644
--- a/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -141,3 +141,16 @@ void Thumb1InstrInfo::expandLoadStackGuard(
else
expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_abs, ARM::tLDRi);
}
+
+bool Thumb1InstrInfo::canCopyGluedNodeDuringSchedule(SDNode *N) const {
+ // In Thumb1 the scheduler may need to schedule a cross-copy between GPRS and CPSR
+ // but this is not always possible there, so allow the Scheduler to clone tADCS and tSBCS
+ // even if they have glue.
+ // FIXME. Actually implement the cross-copy where it is possible (post v6)
+ // because these copies entail more spilling.
+ unsigned Opcode = N->getMachineOpcode();
+ if (Opcode == ARM::tADCS || Opcode == ARM::tSBCS)
+ return true;
+
+ return false;
+}
diff --git a/lib/Target/ARM/Thumb1InstrInfo.h b/lib/Target/ARM/Thumb1InstrInfo.h
index e8d9a9c4ff1..9f04a3ed262 100644
--- a/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/lib/Target/ARM/Thumb1InstrInfo.h
@@ -53,6 +53,7 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
+ bool canCopyGluedNodeDuringSchedule(SDNode *N) const override;
private:
void expandLoadStackGuard(MachineBasicBlock::iterator MI) const override;
};
diff --git a/test/CodeGen/Thumb/pr35836.ll b/test/CodeGen/Thumb/pr35836.ll
new file mode 100644
index 00000000000..7765e66658a
--- /dev/null
+++ b/test/CodeGen/Thumb/pr35836.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv5e-none-linux-gnueabi"
+
+; Function Attrs: norecurse nounwind optsize
+define void @f(i32,i32,i32,i32,i32* %x4p, i32* %x5p, i32* %x6p) {
+if.end:
+ br label %while.body
+
+while.body:
+ %ll.0100 = phi i64 [ 0, %if.end ], [ %shr32, %while.body ]
+ %add = add nuw nsw i64 %ll.0100, 0
+ %add3 = add nuw nsw i64 %add, 0
+ %shr = lshr i64 %add3, 32
+ %conv7 = zext i32 %0 to i64
+ %conv9 = zext i32 %1 to i64
+ %add10 = add nuw nsw i64 %conv9, %conv7
+ %add11 = add nuw nsw i64 %add10, %shr
+ %shr14 = lshr i64 %add11, 32
+ %conv16 = zext i32 %2 to i64
+ %conv18 = zext i32 %3 to i64
+ %add19 = add nuw nsw i64 %conv18, %conv16
+ %add20 = add nuw nsw i64 %add19, %shr14
+ %conv21 = trunc i64 %add20 to i32
+ store i32 %conv21, i32* %x6p, align 4
+ %shr23 = lshr i64 %add20, 32
+ %x4 = load i32, i32* %x4p, align 4
+ %conv25 = zext i32 %x4 to i64
+ %x5 = load i32, i32* %x5p, align 4
+ %conv27 = zext i32 %x5 to i64
+ %add28 = add nuw nsw i64 %conv27, %conv25
+ %add29 = add nuw nsw i64 %add28, %shr23
+ %shr32 = lshr i64 %add29, 32
+ br label %while.body
+}
+; CHECK: adds r3, r0, r1
+; CHECK: push {r5}
+; CHECK: pop {r1}
+; CHECK: adcs r1, r1
+; CHECK: ldr r0, [sp, #12] @ 4-byte Reload
+; CHECK: ldr r2, [sp, #8] @ 4-byte Reload
+; CHECK: adds r2, r0, r2
+; CHECK: push {r5}
+; CHECK: pop {r4}
+; CHECK: adcs r4, r4
+; CHECK: adds r0, r2, r5
+; CHECK: push {r3}
+; CHECK: pop {r0}
+; CHECK: adcs r0, r4
+; CHECK: ldr r6, [sp, #4] @ 4-byte Reload
+; CHECK: str r0, [r6]
+; CHECK: ldr r0, [r7]
+; CHECK: ldr r6, [sp] @ 4-byte Reload
+; CHECK: ldr r6, [r6]
+; CHECK: adds r0, r6, r0
diff --git a/test/CodeGen/Thumb/pr35836_2.ll b/test/CodeGen/Thumb/pr35836_2.ll
new file mode 100644
index 00000000000..af115e8ce21
--- /dev/null
+++ b/test/CodeGen/Thumb/pr35836_2.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:64:64-i128:64-v128:64:128-a:0:64-n64-S64"
+target triple = "thumbv6---gnueabi"
+
+; Function Attrs: norecurse nounwind readonly
+define i128 @a(i64* nocapture readonly %z) local_unnamed_addr #0 {
+entry:
+ %0 = load i64, i64* %z, align 4
+ %conv.i = zext i64 %0 to i128
+ %arrayidx1 = getelementptr inbounds i64, i64* %z, i64 2
+ %1 = load i64, i64* %arrayidx1, align 4
+ %conv.i38 = zext i64 %1 to i128
+ %shl.i39 = shl nuw i128 %conv.i38, 64
+ %or = or i128 %shl.i39, %conv.i
+ %arrayidx3 = getelementptr inbounds i64, i64* %z, i64 1
+ %2 = load i64, i64* %arrayidx3, align 4
+ %conv.i37 = zext i64 %2 to i128
+ %arrayidx5 = getelementptr inbounds i64, i64* %z, i64 3
+ %3 = load i64, i64* %arrayidx5, align 4
+ %conv.i35 = zext i64 %3 to i128
+ %shl.i36 = shl nuw i128 %conv.i35, 64
+ %or7 = or i128 %shl.i36, %conv.i37
+ %arrayidx10 = getelementptr inbounds i64, i64* %z, i64 4
+ %4 = load i64, i64* %arrayidx10, align 4
+ %conv.i64 = zext i64 %4 to i128
+ %shl.i33 = shl nuw i128 %conv.i64, 64
+ %or12 = or i128 %shl.i33, %conv.i
+ %arrayidx15 = getelementptr inbounds i64, i64* %z, i64 5
+ %5 = load i64, i64* %arrayidx15, align 4
+ %conv.i30 = zext i64 %5 to i128
+ %shl.i = shl nuw i128 %conv.i30, 64
+ %or17 = or i128 %shl.i, %conv.i37
+ %add = add i128 %or7, %or
+ %add18 = add i128 %or17, %or12
+ %mul = mul i128 %add18, %add
+ ret i128 %mul
+}
+; CHECK: adds r4, r2, r7
+; CHECK: mov r4, r1
+; CHECK: adcs r4, r6
+; CHECK: ldr r4, [sp, #20] @ 4-byte Reload
+; CHECK: adcs r5, r4
+; CHECK: ldr r4, [sp, #24] @ 4-byte Reload
+; CHECK: adcs r3, r4
+; CHECK: adds r4, r2, r7
+; CHECK: adcs r1, r6
+; CHECK: mov r2, sp
+; CHECK: str r4, [r2]
+; CHECK: str r1, [r2, #4]
+; CHECK: ldr r6, [r0, #16]
+; CHECK: ldr r7, [r0, #24]
+; CHECK: adcs r7, r6
+; CHECK: str r7, [r2, #8]
+; CHECK: ldr r6, [r0, #20]
+; CHECK: ldr r0, [r0, #28]
+; CHECK: adcs r0, r6