summaryrefslogtreecommitdiff
path: root/lib/Target/AArch64
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2017-12-15 22:22:58 +0000
committerMatthias Braun <matze@braunis.de>2017-12-15 22:22:58 +0000
commitd318139827695f2011ef24693a101829829558b7 (patch)
treee419a9891762635d61c1db320b72411e7b330ce8 /lib/Target/AArch64
parentdfcb4f534480ecf3bc64c11781fa2d3123737e91 (diff)
MachineFunction: Return reference from getFunction(); NFC
The Function can never be nullptr so we can return a reference. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320884 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/AArch64')
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp2
-rw-r--r--lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp2
-rw-r--r--lib/Target/AArch64/AArch64CallLowering.cpp4
-rw-r--r--lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp2
-rw-r--r--lib/Target/AArch64/AArch64CollectLOH.cpp2
-rw-r--r--lib/Target/AArch64/AArch64CondBrTuning.cpp2
-rw-r--r--lib/Target/AArch64/AArch64ConditionOptimizer.cpp2
-rw-r--r--lib/Target/AArch64/AArch64ConditionalCompares.cpp4
-rw-r--r--lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp2
-rw-r--r--lib/Target/AArch64/AArch64FalkorHWPFFix.cpp2
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp22
-rw-r--r--lib/Target/AArch64/AArch64ISelDAGToDAG.cpp2
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp28
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp10
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td6
-rw-r--r--lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp2
-rw-r--r--lib/Target/AArch64/AArch64RedundantCopyElimination.cpp2
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.cpp14
-rw-r--r--lib/Target/AArch64/AArch64SIMDInstrOpt.cpp2
-rw-r--r--lib/Target/AArch64/AArch64StorePairSuppress.cpp2
21 files changed, 60 insertions, 60 deletions
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 1135f0f1262..38a7e331bb9 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -308,7 +308,7 @@ public:
//===----------------------------------------------------------------------===//
bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
if (!F.getSubtarget<AArch64Subtarget>().balanceFPOps())
diff --git a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 7da56ef030a..338daecb49e 100644
--- a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -393,7 +393,7 @@ bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
bool Changed = false;
DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
- if (skipFunction(*mf.getFunction()))
+ if (skipFunction(mf.getFunction()))
return false;
MRI = &mf.getRegInfo();
diff --git a/lib/Target/AArch64/AArch64CallLowering.cpp b/lib/Target/AArch64/AArch64CallLowering.cpp
index 838305858ea..08152c0d83d 100644
--- a/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -220,7 +220,7 @@ void AArch64CallLowering::splitToValueTypes(
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val, unsigned VReg) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
@@ -322,7 +322,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = F.getParent()->getDataLayout();
diff --git a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
index bb750c5093d..b88fba4452a 100644
--- a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
+++ b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
@@ -42,7 +42,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
diff --git a/lib/Target/AArch64/AArch64CollectLOH.cpp b/lib/Target/AArch64/AArch64CollectLOH.cpp
index d9d48e9900a..0a9167edcdb 100644
--- a/lib/Target/AArch64/AArch64CollectLOH.cpp
+++ b/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -482,7 +482,7 @@ static void handleNormalInst(const MachineInstr &MI, LOHInfo *LOHInfos) {
}
bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n"
diff --git a/lib/Target/AArch64/AArch64CondBrTuning.cpp b/lib/Target/AArch64/AArch64CondBrTuning.cpp
index 6fc57623ef4..30cefbad884 100644
--- a/lib/Target/AArch64/AArch64CondBrTuning.cpp
+++ b/lib/Target/AArch64/AArch64CondBrTuning.cpp
@@ -290,7 +290,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
}
bool AArch64CondBrTuning::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "********** AArch64 Conditional Branch Tuning **********\n"
diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index f765825cdee..d14bde33d94 100644
--- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -327,7 +327,7 @@ bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index f7c97117ba5..b0bda7c43c1 100644
--- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -924,7 +924,7 @@ bool AArch64ConditionalCompares::tryConvert(MachineBasicBlock *MBB) {
bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
@@ -936,7 +936,7 @@ bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- MinSize = MF.getFunction()->optForMinSize();
+ MinSize = MF.getFunction().optForMinSize();
bool Changed = false;
CmpConv.runOnMachineFunction(MF, MBPI);
diff --git a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 0298c76d68e..8e7e740da6f 100644
--- a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -198,7 +198,7 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
// Scan the function for instructions that have a dead definition of a
// register. Replace that register with the zero register when possible.
bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TRI = MF.getSubtarget().getRegisterInfo();
diff --git a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
index 7b4ab7cc1a3..d1ddb2e3ef7 100644
--- a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
+++ b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
@@ -798,7 +798,7 @@ bool FalkorHWPFFix::runOnMachineFunction(MachineFunction &Fn) {
if (ST.getProcFamily() != AArch64Subtarget::Falkor)
return false;
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 72330d9b7cb..73944359223 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -174,7 +174,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
return false;
// Don't use the red zone if the function explicitly asks us not to.
// This is typically used for kernel code.
- if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone))
+ if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone))
return false;
const MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -459,13 +459,13 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
+ bool needsFrameMoves = MMI.hasDebugInfo() || F.needsUnwindTableEntry();
bool HasFP = hasFP(MF);
// Debug location must be unknown since the first debug location is used
@@ -474,7 +474,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
int NumBytes = (int)MFI.getStackSize();
@@ -507,7 +507,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
}
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
@@ -716,7 +716,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
// Initial and residual are named for consistency with the prologue. Note that
@@ -765,7 +765,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// it as the 2nd argument of AArch64ISD::TC_RETURN.
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
@@ -857,7 +857,7 @@ int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16;
int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize();
@@ -928,7 +928,7 @@ static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
static bool produceCompactUnwindFrame(MachineFunction &MF) {
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
- AttributeList Attrs = MF.getFunction()->getAttributes();
+ AttributeList Attrs = MF.getFunction().getAttributes();
return Subtarget.isTargetMachO() &&
!(Subtarget.getTargetLowering()->supportSwiftError() &&
Attrs.hasAttrSomewhere(Attribute::SwiftError));
@@ -959,7 +959,7 @@ static void computeCalleeSaveRegisterPairs(
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
MachineFrameInfo &MFI = MF.getFrameInfo();
- CallingConv::ID CC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CC = MF.getFunction().getCallingConv();
unsigned Count = CSI.size();
(void)CC;
// MachO's compact unwind format relies on all registers being stored in
@@ -1154,7 +1154,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
RegScavenger *RS) const {
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 06005f6b688..0b10246b0cc 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -53,7 +53,7 @@ public:
}
bool runOnMachineFunction(MachineFunction &MF) override {
- ForCodeSize = MF.getFunction()->optForSize();
+ ForCodeSize = MF.getFunction().optForSize();
Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index aaf2811563d..1242cf5be18 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2731,7 +2731,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
- bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -2745,7 +2745,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
// we use a special version of AnalyzeFormalArguments to pass in ValVT and
// LocVT.
unsigned NumArgs = Ins.size();
- Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ValVT = Ins[i].VT;
@@ -2935,7 +2935,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
MachineFrameInfo &MFI = MF.getFrameInfo();
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
auto PtrVT = getPointerTy(DAG.getDataLayout());
- bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
SmallVector<SDValue, 8> MemOps;
@@ -3087,15 +3087,15 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
return false;
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = MF.getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
// Byval parameters hand the function a pointer directly into the stack area
// we want to reuse during a tail call. Working around this *is* possible (see
// X86) but less efficient and uglier in LowerCall.
- for (Function::const_arg_iterator i = CallerF->arg_begin(),
- e = CallerF->arg_end();
+ for (Function::const_arg_iterator i = CallerF.arg_begin(),
+ e = CallerF.arg_end();
i != e; ++i)
if (i->hasByValAttr())
return false;
@@ -4185,7 +4185,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
}
SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
- if (DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ if (DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat))
return SDValue();
@@ -4668,7 +4668,7 @@ SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
return LowerWin64_VASTART(Op, DAG);
else if (Subtarget->isTargetDarwin())
return LowerDarwin_VASTART(Op, DAG);
@@ -7909,9 +7909,9 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
// instruction to materialize the v2i64 zero and one store (with restrictive
// addressing mode). Just do two i64 store of zero-registers.
bool Fast;
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat) &&
+ !F.hasFnAttribute(Attribute::NoImplicitFloat) &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
(allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast)))
return MVT::f128;
@@ -8156,7 +8156,7 @@ SDValue
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
- AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (isIntDivCheap(N->getValueType(0), Attr))
return SDValue(N,0); // Lower SDIV as SDIV
@@ -9577,7 +9577,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
return SDValue();
// Don't split at -Oz.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
// Don't split v2i64 vectors. Memcpy lowering produces those and splitting
@@ -10939,7 +10939,7 @@ void AArch64TargetLowering::insertCopiesSplitCSR(
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index f88c0ac6653..8d78b5b6b5b 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -415,7 +415,7 @@ public:
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
- bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat)
@@ -444,8 +444,8 @@ public:
}
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
void insertCopiesSplitCSR(
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 74aee126d45..e26f15bedb7 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -4753,21 +4753,21 @@ AArch64InstrInfo::getOutlininingCandidateInfo(
bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// If F uses a redzone, then don't outline from it because it might mess up
// the stack.
- if (!F->hasFnAttribute(Attribute::NoRedZone))
+ if (!F.hasFnAttribute(Attribute::NoRedZone))
return false;
// If anyone is using the address of this function, don't outline from it.
- if (F->hasAddressTaken())
+ if (F.hasAddressTaken())
return false;
// Can F be deduplicated by the linker? If it can, don't outline from it.
- if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage())
+ if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
-
+
return true;
}
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index 841265c3367..79826ca2ed8 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -328,10 +328,10 @@ def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def ForCodeSize : Predicate<"MF->getFunction()->optForSize()">;
- def NotForCodeSize : Predicate<"!MF->getFunction()->optForSize()">;
+ def ForCodeSize : Predicate<"MF->getFunction().optForSize()">;
+ def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">;
// Avoid generating STRQro if it is slow, unless we're optimizing for code size.
- def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction()->optForSize()">;
+ def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">;
}
include "AArch64InstrFormats.td"
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index c406228b7fe..8a29456430b 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -1759,7 +1759,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
}
bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
index 98480835376..e5822b11432 100644
--- a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
+++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -485,7 +485,7 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) {
bool AArch64RedundantCopyElimination::runOnMachineFunction(
MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 1059bc37c8f..88dd297e007 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -42,22 +42,22 @@ AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
const MCPhysReg *
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF->getFunction().getCallingConv() == CallingConv::GHC)
// GHC set of callee saved regs is empty as all those regs are
// used for passing STG regs around
return CSR_AArch64_NoRegs_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg)
+ if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
return CSR_AArch64_AllRegs_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS)
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ?
CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
CSR_AArch64_CXX_TLS_Darwin_SaveList;
if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
->supportSwiftError() &&
- MF->getFunction()->getAttributes().hasAttrSomewhere(
+ MF->getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_AArch64_AAPCS_SwiftError_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::PreserveMost)
+ if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
return CSR_AArch64_RT_MostRegs_SaveList;
else
return CSR_AArch64_AAPCS_SaveList;
@@ -66,7 +66,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
@@ -84,7 +84,7 @@ AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
return CSR_AArch64_CXX_TLS_Darwin_RegMask;
if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
return CSR_AArch64_AAPCS_SwiftError_RegMask;
if (CC == CallingConv::PreserveMost)
return CSR_AArch64_RT_MostRegs_RegMask;
diff --git a/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp b/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
index 7d439058580..e1851875abc 100644
--- a/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
+++ b/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
@@ -690,7 +690,7 @@ unsigned AArch64SIMDInstrOpt::determineSrcReg(MachineInstr &MI) const {
}
bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
diff --git a/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 78fc322158b..571e61d7083 100644
--- a/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -120,7 +120,7 @@ bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) {
}
bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const TargetSubtargetInfo &ST = MF.getSubtarget();