summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorSimon Dardis <simon.dardis@imgtec.com>2017-04-07 17:25:05 +0000
committerSimon Dardis <simon.dardis@imgtec.com>2017-04-07 17:25:05 +0000
commit0bea7aaa6fe92be25afe2e722543b472d65fc11f (patch)
treee2a67daa76899b522552428399a2250f878e8c6b /lib
parentabcd91992d98513d674b83282a8289ef21cdc0f5 (diff)
Revert "[SelectionDAG] Enable target specific vector scalarization of calls and returns"
This reverts commit r299766. This change appears to have broken the MIPS buildbots. Reverting while I investigate. Revert "[mips] Remove usage of debug only variable (NFC)" This reverts commit r299769. Follow up commit. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@299788 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp232
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h14
-rw-r--r--lib/CodeGen/SelectionDAG/StatepointLowering.cpp2
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp6
-rw-r--r--lib/Target/Mips/MipsCCState.cpp64
-rw-r--r--lib/Target/Mips/MipsCCState.h34
-rw-r--r--lib/Target/Mips/MipsCallingConv.td10
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp74
-rw-r--r--lib/Target/Mips/MipsISelLowering.h27
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp4
10 files changed, 80 insertions, 387 deletions
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 43887a2e348..315d841cf3c 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -115,8 +115,7 @@ static const unsigned MaxParallelChains = 64;
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
- MVT PartVT, EVT ValueVT, const Value *V,
- bool IsABIRegCopy);
+ MVT PartVT, EVT ValueVT, const Value *V);
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
@@ -126,11 +125,10 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
- Optional<ISD::NodeType> AssertOp = None,
- bool IsABIRegCopy = false) {
+ Optional<ISD::NodeType> AssertOp = None) {
if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
- PartVT, ValueVT, V, IsABIRegCopy);
+ PartVT, ValueVT, V);
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -274,8 +272,7 @@ static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
/// ValueVT (ISD::AssertSext).
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
- MVT PartVT, EVT ValueVT, const Value *V,
- bool IsABIRegCopy) {
+ MVT PartVT, EVT ValueVT, const Value *V) {
assert(ValueVT.isVector() && "Not a vector value");
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -286,18 +283,9 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
- unsigned NumRegs;
-
- if (IsABIRegCopy) {
- NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
- *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
- RegisterVT);
- } else {
- NumRegs =
- TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
- NumIntermediates, RegisterVT);
- }
-
+ unsigned NumRegs =
+ TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
+ NumIntermediates, RegisterVT);
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
@@ -326,14 +314,9 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
// intermediate operands.
- EVT BuiltVectorTy =
- EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
- (IntermediateVT.isVector()
- ? IntermediateVT.getVectorNumElements() * NumParts
- : NumIntermediates));
Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
: ISD::BUILD_VECTOR,
- DL, BuiltVectorTy, Ops);
+ DL, ValueVT, Ops);
}
// There is now one part, held in Val. Correct it to match ValueVT.
@@ -372,30 +355,13 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
TLI.isTypeLegal(ValueVT))
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
+ // Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() != 1) {
-
- // Certain ABIs require that vectors are passed as integers. For vectors
- // are the same size, this is an obvious bitcast.
- if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
- return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
- } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
- // Bitcast Val back the original type and extract the corresponding
- // vector we want.
- unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
- EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
- ValueVT.getVectorElementType(), Elts);
- Val = DAG.getBitcast(WiderVecType, Val);
- return DAG.getNode(
- ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
- }
-
- diagnosePossiblyInvalidConstraint(
- *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
+ diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
+ "non-trivial scalar-to-vector conversion");
return DAG.getUNDEF(ValueVT);
}
- // Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() == 1 &&
ValueVT.getVectorElementType() != PartEVT)
Val = DAG.getAnyExtOrTrunc(Val, DL, ValueVT.getScalarType());
@@ -405,7 +371,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
- MVT PartVT, const Value *V, bool IsABIRegCopy);
+ MVT PartVT, const Value *V);
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
@@ -413,14 +379,12 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
SDValue *Parts, unsigned NumParts, MVT PartVT,
const Value *V,
- ISD::NodeType ExtendKind = ISD::ANY_EXTEND,
- bool IsABIRegCopy = false) {
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
EVT ValueVT = Val.getValueType();
// Handle the vector case separately.
if (ValueVT.isVector())
- return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
- IsABIRegCopy);
+ return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
@@ -545,9 +509,7 @@ static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
/// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
- MVT PartVT, const Value *V,
- bool IsABIRegCopy) {
-
+ MVT PartVT, const Value *V) {
EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -588,22 +550,15 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// Promoted vector extract
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
- } else {
+ } else{
// Vector -> scalar conversion.
- if (ValueVT.getVectorNumElements() == 1) {
- Val = DAG.getNode(
- ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ assert(ValueVT.getVectorNumElements() == 1 &&
+ "Only trivial vector-to-scalar conversions should get here!");
+ Val = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
- Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
- } else {
- assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
- "lossy conversion of vector to scalar type");
- EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(),
- ValueVT.getSizeInBits());
- Val = DAG.getBitcast(IntermediateType, Val);
- Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
- }
+ Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
}
Parts[0] = Val;
@@ -614,31 +569,15 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
- unsigned NumRegs;
- if (IsABIRegCopy) {
- NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
- *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
- RegisterVT);
- } else {
- NumRegs =
- TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
- NumIntermediates, RegisterVT);
- }
+ unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
+ IntermediateVT,
+ NumIntermediates, RegisterVT);
unsigned NumElements = ValueVT.getVectorNumElements();
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
- // Convert the vector to the appropiate type if necessary.
- unsigned DestVectorNoElts =
- NumIntermediates *
- (IntermediateVT.isVector() ? IntermediateVT.getVectorNumElements() : 1);
- EVT BuiltVectorTy = EVT::getVectorVT(
- *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
- if (Val.getValueType() != BuiltVectorTy)
- Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
-
// Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
@@ -671,31 +610,22 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
}
}
-RegsForValue::RegsForValue() { IsABIMangled = false; }
+RegsForValue::RegsForValue() {}
RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
- EVT valuevt, bool IsABIMangledValue)
- : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
- RegCount(1, regs.size()), IsABIMangled(IsABIMangledValue) {}
+ EVT valuevt)
+ : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
- const DataLayout &DL, unsigned Reg, Type *Ty,
- bool IsABIMangledValue) {
+ const DataLayout &DL, unsigned Reg, Type *Ty) {
ComputeValueVTs(TLI, DL, Ty, ValueVTs);
- IsABIMangled = IsABIMangledValue;
-
for (EVT ValueVT : ValueVTs) {
- unsigned NumRegs = IsABIMangledValue
- ? TLI.getNumRegistersForCallingConv(Context, ValueVT)
- : TLI.getNumRegisters(Context, ValueVT);
- MVT RegisterVT = IsABIMangledValue
- ? TLI.getRegisterTypeForCallingConv(Context, ValueVT)
- : TLI.getRegisterType(Context, ValueVT);
+ unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
+ MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT);
- RegCount.push_back(NumRegs);
Reg += NumRegs;
}
}
@@ -716,10 +646,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
// Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = RegCount[Value];
- MVT RegisterVT = IsABIMangled
- ? TLI.getRegisterTypeForCallingConv(RegVTs[Value])
- : RegVTs[Value];
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
+ MVT RegisterVT = RegVTs[Value];
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
@@ -814,11 +742,9 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
unsigned NumRegs = Regs.size();
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
- unsigned NumParts = RegCount[Value];
-
- MVT RegisterVT = IsABIMangled
- ? TLI.getRegisterTypeForCallingConv(RegVTs[Value])
- : RegVTs[Value];
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
+ MVT RegisterVT = RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
ExtendKind = ISD::ZERO_EXTEND;
@@ -1041,16 +967,10 @@ SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
if (It != FuncInfo.ValueMap.end()) {
unsigned InReg = It->second;
- bool IsABIRegCopy =
- V && ((isa<CallInst>(V) &&
- !(static_cast<const CallInst *>(V))->isInlineAsm()) ||
- isa<ReturnInst>(V));
-
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
- DAG.getDataLayout(), InReg, Ty, IsABIRegCopy);
+ DAG.getDataLayout(), InReg, Ty);
SDValue Chain = DAG.getEntryNode();
- Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
- V);
+ Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
resolveDanglingDebugInfo(V, Result);
}
@@ -1237,13 +1157,8 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
- bool IsABIRegCopy =
- V && ((isa<CallInst>(V) &&
- !(static_cast<const CallInst *>(V))->isInlineAsm()) ||
- isa<ReturnInst>(V));
-
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
- Inst->getType(), IsABIRegCopy);
+ Inst->getType());
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
@@ -1471,12 +1386,12 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
- unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, VT);
- MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, VT);
+ unsigned NumParts = TLI.getNumRegisters(Context, VT);
+ MVT PartVT = TLI.getRegisterType(Context, VT);
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
- &Parts[0], NumParts, PartVT, &I, ExtendKind, true);
+ &Parts[0], NumParts, PartVT, &I, ExtendKind);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
@@ -7149,8 +7064,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
SDLoc dl = getCurSDLoc();
// Use the produced MatchedRegs object to
- MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
- CS.getInstruction());
+ MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl,
+ Chain, &Flag, CS.getInstruction());
MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
true, OpInfo.getMatchedOperand(), dl,
DAG, AsmNodeOperands);
@@ -7766,10 +7681,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
} else {
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
- MVT RegisterVT =
- getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
- unsigned NumRegs =
- getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
+ MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT;
@@ -7818,11 +7731,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
-
- // Certain targets (such as MIPS), may have a different ABI alignment
- // for a type depending on the context. Give the target a chance to
- // specify the alignment it wants.
- unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
if (Args[i].IsZExt)
Flags.setZExt();
@@ -7877,9 +7786,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment);
- MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
- unsigned NumParts =
- getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
+ MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
@@ -7909,8 +7817,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
}
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
- CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind,
- true);
+ CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
@@ -8010,14 +7917,12 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
unsigned CurReg = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
- MVT RegisterVT =
- getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
- unsigned NumRegs =
- getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
+ MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
NumRegs, RegisterVT, VT, nullptr,
- AssertOp, true));
+ AssertOp));
CurReg += NumRegs;
}
@@ -8053,15 +7958,8 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- // If this is an InlineAsm we have to match the registers required, not the
- // notional registers required by the type.
- bool IsABIRegCopy =
- V && ((isa<CallInst>(V) &&
- !(static_cast<const CallInst *>(V))->isInlineAsm()) ||
- isa<ReturnInst>(V));
-
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
- V->getType(), IsABIRegCopy);
+ V->getType());
SDValue Chain = DAG.getEntryNode();
ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
@@ -8304,12 +8202,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
-
- // Certain targets (such as MIPS), may have a different ABI alignment
- // for a type depending on the context. Give the target a chance to
- // specify the alignment it wants.
- unsigned OriginalAlignment =
- TLI->getABIAlignmentForCallingConv(ArgTy, DL);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
Flags.setZExt();
@@ -8371,10 +8264,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
if (ArgCopyElisionCandidates.count(&Arg))
Flags.setCopyElisionCandidate();
- MVT RegisterVT =
- TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT);
- unsigned NumRegs =
- TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
+ MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
+ unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
Idx-1, PartBase+i*RegisterVT.getStoreSize());
@@ -8481,10 +8372,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
for (unsigned Val = 0; Val != NumValues; ++Val) {
EVT VT = ValueVTs[Val];
- MVT PartVT =
- TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT);
- unsigned NumParts =
- TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
+ MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
+ unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
// Even an apparant 'unused' swifterror argument needs to be returned. So
// we do generate a copy for it that can be used on return from the
@@ -8497,8 +8386,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
- PartVT, VT, nullptr, AssertOp,
- true));
+ PartVT, VT, nullptr, AssertOp));
}
i += NumParts;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index b24a513f3c0..c6acc09b660 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -973,28 +973,18 @@ struct RegsForValue {
/// expanded value requires multiple registers.
SmallVector<unsigned, 4> Regs;
- /// This list holds the number of registers for each value.
- SmallVector<unsigned, 4> RegCount;
-
- /// Records if this value needs to be treated in an ABI dependant manner,
- /// different to normal type legalization.
- bool IsABIMangled;
-
RegsForValue();
- RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt,
- bool IsABIMangledValue = false);
+ RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt);
RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
- const DataLayout &DL, unsigned Reg, Type *Ty,
- bool IsABIMangledValue = false);
+ const DataLayout &DL, unsigned Reg, Type *Ty);
/// Add the specified values to this one.
void append(const RegsForValue &RHS) {
ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
Regs.append(RHS.Regs.begin(), RHS.Regs.end());
- RegCount.push_back(RHS.Regs.size());
}
/// Emit a series of CopyFromReg nodes that copies from this value and returns
diff --git a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 1e2dc3a9f23..d27e2455978 100644
--- a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -835,7 +835,7 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP,
// completely and make statepoint call to return a tuple.
unsigned Reg = FuncInfo.CreateRegs(RetTy);
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
- DAG.getDataLayout(), Reg, RetTy, true);
+ DAG.getDataLayout(), Reg, RetTy);
SDValue Chain = DAG.getEntryNode();
RFV.getCopyToRegs(ReturnValue, DAG, getCurSDLoc(), Chain, nullptr);
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index c9ecd8ae0f9..fc147633966 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -1616,10 +1616,8 @@ void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
VT = MinVT;
}
- unsigned NumParts =
- TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
- MVT PartVT =
- TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
+ unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
+ MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
diff --git a/lib/Target/Mips/MipsCCState.cpp b/lib/Target/Mips/MipsCCState.cpp
index 62ff99c7816..7af988c1f64 100644
--- a/lib/Target/Mips/MipsCCState.cpp
+++ b/lib/Target/Mips/MipsCCState.cpp
@@ -54,22 +54,6 @@ static bool originalTypeIsF128(Type *Ty, const SDNode *CallNode) {
return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
}
-/// Return true if the original type was vXfXX.
-static bool originalEVTTypeIsVectorFloat(EVT Ty) {
- if (Ty.isVector() && Ty.getVectorElementType().isFloatingPoint())
- return true;
-
- return false;
-}
-
-/// Return true if the original type was vXfXX / vXfXX.
-static bool originalTypeIsVectorFloat(Type * Ty) {
- if (Ty->isVectorTy() && Ty->isFPOrFPVectorTy())
- return true;
-
- return false;
-}
-
MipsCCState::SpecialCallingConvType
MipsCCState::getSpecialCallingConvForCallee(const SDNode *Callee,
const MipsSubtarget &Subtarget) {
@@ -97,8 +81,8 @@ void MipsCCState::PreAnalyzeCallResultForF128(
}
}
-/// Identify lowered values that originated from f128 or float arguments and
-/// record this for use by RetCC_MipsN.
+/// Identify lowered values that originated from f128 arguments and record
+/// this for use by RetCC_MipsN.
void MipsCCState::PreAnalyzeReturnForF128(
const SmallVectorImpl<ISD::OutputArg> &Outs) {
const MachineFunction &MF = getMachineFunction();
@@ -110,50 +94,26 @@ void MipsCCState::PreAnalyzeReturnForF128(
}
}
-/// Identify lower values that originated from vXfXX and record
-/// this.
-void MipsCCState::PreAnalyzeCallResultForVectorFloat(
- const SmallVectorImpl<ISD::InputArg> &Ins,
- const TargetLowering::CallLoweringInfo &CLI) {
- for (unsigned i = 0; i < Ins.size(); ++i) {
- OriginalRetWasFloatVector.push_back(
- originalTypeIsVectorFloat(CLI.RetTy));
- }
-}
-
-/// Identify lowered values that originated from vXfXX arguments and record
+/// Identify lowered values that originated from f128 arguments and record
/// this.
-void MipsCCState::PreAnalyzeReturnForVectorFloat(
- const SmallVectorImpl<ISD::OutputArg> &Outs) {
- for (unsigned i = 0; i < Outs.size(); ++i) {
- ISD::OutputArg Out = Outs[i];
- OriginalRetWasFloatVector.push_back(
- originalEVTTypeIsVectorFloat(Out.ArgVT));
- }
-}
-/// Identify lowered values that originated from f128, float and sret to vXfXX
-/// arguments and record this.
void MipsCCState::PreAnalyzeCallOperands(
const SmallVectorImpl<ISD::OutputArg> &Outs,
std::vector<TargetLowering::ArgListEntry> &FuncArgs,
const SDNode *CallNode) {
for (unsigned i = 0; i < Outs.size(); ++i) {
- TargetLowering::ArgListEntry FuncArg = FuncArgs[Outs[i].OrigArgIndex];
-
- OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.Ty, CallNode));
- OriginalArgWasFloat.push_back(FuncArg.Ty->isFloatingPointTy());
-
- OriginalArgWasFloatVector.push_back(FuncArg.Ty->isVectorTy());
+ OriginalArgWasF128.push_back(
+ originalTypeIsF128(FuncArgs[Outs[i].OrigArgIndex].Ty, CallNode));
+ OriginalArgWasFloat.push_back(
+ FuncArgs[Outs[i].OrigArgIndex].Ty->isFloatingPointTy());
CallOperandIsFixed.push_back(Outs[i].IsFixed);
}
}
-/// Identify lowered values that originated from f128, float and vXfXX arguments
-/// and record this.
+/// Identify lowered values that originated from f128 arguments and record
+/// this.
void MipsCCState::PreAnalyzeFormalArgumentsForF128(
const SmallVectorImpl<ISD::InputArg> &Ins) {
const MachineFunction &MF = getMachineFunction();
-
for (unsigned i = 0; i < Ins.size(); ++i) {
Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
@@ -163,7 +123,6 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
if (Ins[i].Flags.isSRet()) {
OriginalArgWasF128.push_back(false);
OriginalArgWasFloat.push_back(false);
- OriginalArgWasFloatVector.push_back(false);
continue;
}
@@ -173,10 +132,5 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
OriginalArgWasF128.push_back(
originalTypeIsF128(FuncArg->getType(), nullptr));
OriginalArgWasFloat.push_back(FuncArg->getType()->isFloatingPointTy());
-
- // The MIPS vector ABI exhibits a corner case of sorts or quirk; if the
- // first argument is actually an SRet pointer to a vector, then the next
- // argument slot is $a2.
- OriginalArgWasFloatVector.push_back(FuncArg->getType()->isVectorTy());
}
}
diff --git a/lib/Target/Mips/MipsCCState.h b/lib/Target/Mips/MipsCCState.h
index d86bb85126b..081c393a09b 100644
--- a/lib/Target/Mips/MipsCCState.h
+++ b/lib/Target/Mips/MipsCCState.h
@@ -45,33 +45,16 @@ private:
const SDNode *CallNode);
/// Identify lowered values that originated from f128 arguments and record
- /// this for use by RetCC_MipsN.
+ /// this.
void
PreAnalyzeFormalArgumentsForF128(const SmallVectorImpl<ISD::InputArg> &Ins);
- void PreAnalyzeCallResultForVectorFloat(
- const SmallVectorImpl<ISD::InputArg> &Ins,
- const TargetLowering::CallLoweringInfo &CLI);
-
- void PreAnalyzeFormalArgumentsForVectorFloat(
- const SmallVectorImpl<ISD::InputArg> &Ins);
-
- void
- PreAnalyzeReturnForVectorFloat(const SmallVectorImpl<ISD::OutputArg> &Outs);
-
/// Records whether the value has been lowered from an f128.
SmallVector<bool, 4> OriginalArgWasF128;
/// Records whether the value has been lowered from float.
SmallVector<bool, 4> OriginalArgWasFloat;
- /// Records whether the value has been lowered from a floating point vector.
- SmallVector<bool, 4> OriginalArgWasFloatVector;
-
- /// Records whether the return value has been lowered from a floating point
- /// vector.
- SmallVector<bool, 4> OriginalRetWasFloatVector;
-
/// Records whether the value was a fixed argument.
/// See ISD::OutputArg::IsFixed,
SmallVector<bool, 4> CallOperandIsFixed;
@@ -95,7 +78,6 @@ public:
CCState::AnalyzeCallOperands(Outs, Fn);
OriginalArgWasF128.clear();
OriginalArgWasFloat.clear();
- OriginalArgWasFloatVector.clear();
CallOperandIsFixed.clear();
}
@@ -114,38 +96,31 @@ public:
CCState::AnalyzeFormalArguments(Ins, Fn);
OriginalArgWasFloat.clear();
OriginalArgWasF128.clear();
- OriginalArgWasFloatVector.clear();
}
void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn,
const TargetLowering::CallLoweringInfo &CLI) {
PreAnalyzeCallResultForF128(Ins, CLI);
- PreAnalyzeCallResultForVectorFloat(Ins, CLI);
CCState::AnalyzeCallResult(Ins, Fn);
OriginalArgWasFloat.clear();
OriginalArgWasF128.clear();
- OriginalArgWasFloatVector.clear();
}
void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
PreAnalyzeReturnForF128(Outs);
- PreAnalyzeReturnForVectorFloat(Outs);
CCState::AnalyzeReturn(Outs, Fn);
OriginalArgWasFloat.clear();
OriginalArgWasF128.clear();
- OriginalArgWasFloatVector.clear();
}
bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
CCAssignFn Fn) {
PreAnalyzeReturnForF128(ArgsFlags);
- PreAnalyzeReturnForVectorFloat(ArgsFlags);
bool Return = CCState::CheckReturn(ArgsFlags, Fn);
OriginalArgWasFloat.clear();
OriginalArgWasF128.clear();
- OriginalArgWasFloatVector.clear();
return Return;
}
@@ -153,13 +128,6 @@ public:
bool WasOriginalArgFloat(unsigned ValNo) {
return OriginalArgWasFloat[ValNo];
}
- bool WasOriginalArgVectorFloat(unsigned ValNo) const {
- return OriginalArgWasFloatVector[ValNo];
- }
- bool WasOriginalRetVectorFloat(unsigned ValNo) const {
- return OriginalRetWasFloatVector[ValNo];
- }
-
bool IsCallOperandFixed(unsigned ValNo) { return CallOperandIsFixed[ValNo]; }
SpecialCallingConvType getSpecialCallingConv() { return SpecialCallingConv; }
};
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index b5df78f89a6..a57cb7badc1 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -37,10 +37,6 @@ class CCIfOrigArgWasF128<CCAction A>
class CCIfArgIsVarArg<CCAction A>
: CCIf<"!static_cast<MipsCCState *>(&State)->IsCallOperandFixed(ValNo)", A>;
-/// Match if the return was a floating point vector.
-class CCIfOrigArgWasNotVectorFloat<CCAction A>
- : CCIf<"!static_cast<MipsCCState *>(&State)"
- "->WasOriginalRetVectorFloat(ValNo)", A>;
/// Match if the special calling conv is the specified value.
class CCIfSpecialCallingConv<string CC, CCAction A>
@@ -97,10 +93,8 @@ def RetCC_MipsO32 : CallingConv<[
// Promote i1/i8/i16 return values to i32.
CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
- // i32 are returned in registers V0, V1, A0, A1, unless the original return
- // type was a vector of floats.
- CCIfOrigArgWasNotVectorFloat<CCIfType<[i32],
- CCAssignToReg<[V0, V1, A0, A1]>>>,
+ // i32 are returned in registers V0, V1, A0, A1
+ CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>,
// f32 are returned in registers F0, F2
CCIfType<[f32], CCAssignToReg<[F0, F2]>>,
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index a726e25f0b0..93c5f496ce9 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -71,48 +71,6 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
return true;
}
-// The MIPS MSA ABI passes vector arguments in the integer register set.
-// The number of integer registers used is dependant on the ABI used.
-MVT MipsTargetLowering::getRegisterTypeForCallingConv(MVT VT) const {
- if (VT.isVector() && Subtarget.hasMSA())
- return Subtarget.isABI_O32() ? MVT::i32 : MVT::i64;
- return MipsTargetLowering::getRegisterType(VT);
-}
-
-MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
- EVT VT) const {
- if (VT.isVector()) {
- if (Subtarget.isABI_O32()) {
- return MVT::i32;
- } else {
- return (VT.getSizeInBits() == 32) ? MVT::i32 : MVT::i64;
- }
- }
- return MipsTargetLowering::getRegisterType(Context, VT);
-}
-
-unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
- EVT VT) const {
- if (VT.isVector())
- return std::max((VT.getSizeInBits() / (Subtarget.isABI_O32() ? 32 : 64)),
- 1U);
- return MipsTargetLowering::getNumRegisters(Context, VT);
-}
-
-unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
- LLVMContext &Context, EVT VT, EVT &IntermediateVT,
- unsigned &NumIntermediates, MVT &RegisterVT) const {
-
- // Break down vector types to either 2 i64s or 4 i32s.
- RegisterVT = getRegisterTypeForCallingConv(Context, VT) ;
- IntermediateVT = RegisterVT;
- NumIntermediates = VT.getSizeInBits() < RegisterVT.getSizeInBits()
- ? VT.getVectorNumElements()
- : VT.getSizeInBits() / RegisterVT.getSizeInBits();
-
- return NumIntermediates;
-}
-
SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
MipsFunctionInfo *FI = DAG.getMachineFunction().getInfo<MipsFunctionInfo>();
return DAG.getRegister(FI->getGlobalBaseReg(), Ty);
@@ -2557,11 +2515,6 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
// not used, it must be shadowed. If only A3 is available, shadow it and
// go to stack.
-// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
-// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
-// with the remainder spilled to the stack.
-// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
-// spilling the remainder to the stack.
//
// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
//===----------------------------------------------------------------------===//
@@ -2573,13 +2526,8 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
State.getMachineFunction().getSubtarget());
static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
-
- const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
-
static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
- static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
-
// Do not process byval args here.
if (ArgFlags.isByVal())
return true;
@@ -2617,26 +2565,8 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
State.getFirstUnallocated(F32Regs) != ValNo;
unsigned OrigAlign = ArgFlags.getOrigAlign();
bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8);
- bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
-
- // The MIPS vector ABI for floats passes them in a pair of registers
- if (ValVT == MVT::i32 && isVectorFloat) {
- // This is the start of an vector that was scalarized into an unknown number
- // of components. It doesn't matter how many there are. Allocate one of the
- // notional 8 byte aligned registers which map onto the argument stack, and
- // shadow the register lost to alignment requirements.
- if (ArgFlags.isSplit()) {
- Reg = State.AllocateReg(FloatVectorIntRegs);
- if (Reg == Mips::A2)
- State.AllocateReg(Mips::A1);
- else if (Reg == 0)
- State.AllocateReg(Mips::A3);
- } else {
- // If we're an intermediate component of the split, we can just attempt to
- // allocate a register directly.
- Reg = State.AllocateReg(IntRegs);
- }
- } else if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
+
+ if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
Reg = State.AllocateReg(IntRegs);
// If this is the first part of an i64 arg,
// the allocated register must be either A0 or A2.
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 0e47ed38f42..2dcafd51061 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -248,33 +248,6 @@ namespace llvm {
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
- /// Return the register type for a given MVT, ensuring vectors are treated
- /// as a series of gpr sized integers.
- virtual MVT getRegisterTypeForCallingConv(MVT VT) const override;
-
- /// Return the register type for a given MVT, ensuring vectors are treated
- /// as a series of gpr sized integers.
- virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
- EVT VT) const override;
-
- /// Return the number of registers for a given MVT, ensuring vectors are
- /// treated as a series of gpr sized integers.
- virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
- EVT VT) const override;
-
- /// Break down vectors to the correct number of gpr sized integers.
- virtual unsigned getVectorTypeBreakdownForCallingConv(
- LLVMContext &Context, EVT VT, EVT &IntermediateVT,
- unsigned &NumIntermediates, MVT &RegisterVT) const override;
-
- /// Return the correct alignment for the current calling convention.
- virtual unsigned
- getABIAlignmentForCallingConv(Type *ArgTy, DataLayout DL) const override {
- if (ArgTy->isVectorTy())
- return std::min(DL.getABITypeAlignment(ArgTy), 8U);
- return DL.getABITypeAlignment(ArgTy);
- }
-
ISD::NodeType getExtendForAtomicOps() const override {
return ISD::SIGN_EXTEND;
}
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index de3389b5a6b..65be350f259 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -286,9 +286,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"
<< "spOffset : " << spOffset << "\n"
- << "stackSize : " << stackSize << "\n"
- << "alignment : "
- << MF.getFrameInfo().getObjectAlignment(FrameIndex) << "\n");
+ << "stackSize : " << stackSize << "\n");
eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);
}