summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2018-01-10 18:16:24 +0000
committerMatthias Braun <matze@braunis.de>2018-01-10 18:16:24 +0000
commit11adaf9955a9580030f8cb1aaf4cbd384b86af8a (patch)
treeeccd0a63f6df75783d9b5c2961ec63ba2d26fb92
parent1f2edf66a2a27193860aa29b80f3d54666d4877a (diff)
AArch64: Fix emergency spillslot being out of reach for large callframes
Large callframes (calls with several hundreds or thousands or parameters) could lead to situations in which the emergency spillslot is out of range to be addressed relative to the stack pointer. This commit forces the use of a frame pointer in the presence of large callframes. This commit does several things: - Compute max callframe size at the end of instruction selection. - Add mirFileLoaded target callback. Use it to compute the max callframe size after loading a .mir file when the size wasn't specified in the file. - Let TargetFrameLowering::hasFP() return true if there exists a callframe > 255 bytes. - Always place the emergency spillslot close to FP if we have a frame pointer. - Note that `useFPForScavengingIndex()` would previously return false when a base pointer was available leading to the emergency spillslot getting allocated late (that's the whole effect of this callback). Which made no sense to me so I took this case out: Even though the emergency spillslot is technically not referenced by FP in this case we still want it allocated early. Differential Revision: https://reviews.llvm.org/D40876 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@322200 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/TargetSubtargetInfo.h3
-rw-r--r--lib/CodeGen/MIRParser/MIRParser.cpp2
-rw-r--r--lib/CodeGen/TargetSubtargetInfo.cpp3
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp32
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp5
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h2
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.cpp12
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp10
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h2
-rw-r--r--test/CodeGen/AArch64/big-callframe.ll15
10 files changed, 75 insertions, 11 deletions
diff --git a/include/llvm/CodeGen/TargetSubtargetInfo.h b/include/llvm/CodeGen/TargetSubtargetInfo.h
index 576522aef46..d5b413c5342 100644
--- a/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -248,6 +248,9 @@ public:
/// Returns string representation of scheduler comment
std::string getSchedInfoStr(const MachineInstr &MI) const override;
std::string getSchedInfoStr(MCInst const &MCI) const override;
+
+ /// This is called after a .mir file was loaded.
+ virtual void mirFileLoaded(MachineFunction &MF) const;
};
} // end namespace llvm
diff --git a/lib/CodeGen/MIRParser/MIRParser.cpp b/lib/CodeGen/MIRParser/MIRParser.cpp
index 7d8e62736a3..e4e3fbbd75d 100644
--- a/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -417,6 +417,8 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
computeFunctionProperties(MF);
+ MF.getSubtarget().mirFileLoaded(MF);
+
MF.verify();
return false;
}
diff --git a/lib/CodeGen/TargetSubtargetInfo.cpp b/lib/CodeGen/TargetSubtargetInfo.cpp
index 1a317cd865f..7aab869f025 100644
--- a/lib/CodeGen/TargetSubtargetInfo.cpp
+++ b/lib/CodeGen/TargetSubtargetInfo.cpp
@@ -111,3 +111,6 @@ std::string TargetSubtargetInfo::getSchedInfoStr(MCInst const &MCI) const {
TSchedModel.computeInstrRThroughput(MCI.getOpcode());
return createSchedInfoStr(Latency, RThroughput);
}
+
+void TargetSubtargetInfo::mirFileLoaded(MachineFunction &MF) const {
+}
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index d66f7b59a4b..ea4bfe7e8d9 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -142,6 +142,12 @@ static cl::opt<bool> EnableRedZone("aarch64-redzone",
STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
+/// This is the biggest offset to the stack pointer we can encode in aarch64
+/// instructions (without using a separate calculation and a temp register).
+/// Note that the exception here are vector stores/loads which cannot encode any
+/// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()).
+static const unsigned DefaultSafeSPDisplacement = 255;
+
/// Look at each instruction that references stack frames and return the stack
/// size limit beyond which some of these instructions will require a scratch
/// register during their expansion later.
@@ -167,7 +173,7 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
}
}
}
- return 255;
+ return DefaultSafeSPDisplacement;
}
bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
@@ -191,11 +197,25 @@ bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
// Retain behavior of always omitting the FP for leaf functions when possible.
- return (MFI.hasCalls() &&
- MF.getTarget().Options.DisableFramePointerElim(MF)) ||
- MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
- MFI.hasStackMap() || MFI.hasPatchPoint() ||
- RegInfo->needsStackRealignment(MF);
+ if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF))
+ return true;
+ if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
+ MFI.hasStackMap() || MFI.hasPatchPoint() ||
+ RegInfo->needsStackRealignment(MF))
+ return true;
+ // With large callframes around we may need to use FP to access the scavenging
+ // emergency spillslot.
+ //
+ // Unfortunately some calls to hasFP() like machine verifier ->
+ // getReservedReg() -> hasFP in the middle of global isel are too early
+ // to know the max call frame size. Hopefully conservatively returning "true"
+ // in those cases is fine.
+ // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs.
+ if (!MFI.isMaxCallFrameSizeComputed() ||
+ MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement)
+ return true;
+
+ return false;
}
/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 41ed24c329e..c810fd777bd 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10974,3 +10974,8 @@ AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
}
+
+void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
+ MF.getFrameInfo().computeMaxCallFrameSize(MF);
+ TargetLoweringBase::finalizeLowering(MF);
+}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index 8d78b5b6b5b..6a89f23cdc8 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -647,6 +647,8 @@ private:
SelectionDAG &DAG) const override;
bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
+
+ void finalizeLowering(MachineFunction &MF) const override;
};
namespace AArch64 {
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 88dd297e007..360b39125b7 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -225,11 +225,13 @@ bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
bool
AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
- // AArch64FrameLowering::resolveFrameIndexReference() can always fall back
- // to the stack pointer, so only put the emergency spill slot next to the
- // FP when there's no better way to access it (SP or base pointer).
- return MFI.hasVarSizedObjects() && !hasBasePointer(MF);
+ // This function indicates whether the emergency spillslot should be placed
+ // close to the beginning of the stackframe (closer to FP) or the end
+ // (closer to SP).
+ //
+ // The beginning works most reliably if we have a frame pointer.
+ const AArch64FrameLowering &TFI = *getFrameLowering(MF);
+ return TFI.hasFP(MF);
}
bool AArch64RegisterInfo::requiresFrameIndexScavenging(
diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp
index 688bb936d0c..eb9bb1498d6 100644
--- a/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -250,3 +250,13 @@ std::unique_ptr<PBQPRAConstraint>
AArch64Subtarget::getCustomPBQPConstraints() const {
return balanceFPOps() ? llvm::make_unique<A57ChainingConstraint>() : nullptr;
}
+
+void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
+ // We usually compute max call frame size after ISel. Do the computation now
+ // if the .mir file didn't specify it. Note that this will probably give you
+ // bogus values after PEI has eliminated the callframe setup/destroy pseudo
+ // instructions, specify explicitely if you need it to be correct.
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ if (!MFI.isMaxCallFrameSizeComputed())
+ MFI.computeMaxCallFrameSize(MF);
+}
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index 9245b2f396b..45a8eb16464 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -326,6 +326,8 @@ public:
return false;
}
}
+
+ void mirFileLoaded(MachineFunction &MF) const override;
};
} // End llvm namespace
diff --git a/test/CodeGen/AArch64/big-callframe.ll b/test/CodeGen/AArch64/big-callframe.ll
new file mode 100644
index 00000000000..6c84e7f550e
--- /dev/null
+++ b/test/CodeGen/AArch64/big-callframe.ll
@@ -0,0 +1,15 @@
+; RUN: llc -o - %s | FileCheck %s
+; Make sure we use a frame pointer and fp relative addressing for the emergency
+; spillslot when we have gigantic callframes.
+; CHECK-LABEL: func:
+; CHECK: stur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Spill
+; CHECK: ldur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Reload
+target triple = "aarch64--"
+declare void @extfunc([4096 x i64]* byval %p)
+define void @func([4096 x i64]* %z) {
+ %lvar = alloca [31 x i8]
+ %v = load volatile [31 x i8], [31 x i8]* %lvar
+ store volatile [31 x i8] %v, [31 x i8]* %lvar
+ call void @extfunc([4096 x i64]* byval %z)
+ ret void
+}