summaryrefslogtreecommitdiff
path: root/lib/Analysis/ConstantFolding.cpp
diff options
context:
space:
mode:
authorDavid Majnemer <david.majnemer@gmail.com>2016-07-13 04:22:12 +0000
committerDavid Majnemer <david.majnemer@gmail.com>2016-07-13 04:22:12 +0000
commit0c6dbcca55584136c467728ff3e965b9af9a35ce (patch)
tree52603235f2591c3cf45c504c05c26d8c9b160d37 /lib/Analysis/ConstantFolding.cpp
parent75f3342b8f202ae689eb78107ddb899599ea31b6 (diff)
[ConstantFolding] Cleanups
No functional change is intended, just a minor cleanup. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@275249 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Analysis/ConstantFolding.cpp')
-rw-r--r--lib/Analysis/ConstantFolding.cpp133
1 files changed, 66 insertions, 67 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index c6dae24a63b..bf0cb3e1a18 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -61,8 +61,8 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
return Constant::getAllOnesValue(DestTy);
// Handle a vector->integer cast.
- if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
- VectorType *VTy = dyn_cast<VectorType>(C->getType());
+ if (auto *IT = dyn_cast<IntegerType>(DestTy)) {
+ auto *VTy = dyn_cast<VectorType>(C->getType());
if (!VTy)
return ConstantExpr::getBitCast(C, DestTy);
@@ -102,7 +102,7 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
}
// The code below only handles casts to vectors currently.
- VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
+ auto *DestVTy = dyn_cast<VectorType>(DestTy);
if (!DestVTy)
return ConstantExpr::getBitCast(C, DestTy);
@@ -180,7 +180,7 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
Constant *Elt = Zero;
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
- Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
+ Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
@@ -206,7 +206,7 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
- Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
+ auto *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
@@ -249,7 +249,7 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
}
// Otherwise, if this isn't a constant expr, bail out.
- ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
+ auto *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return false;
// Look through ptr->int and ptr->ptr casts.
@@ -258,7 +258,7 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
- GEPOperator *GEP = dyn_cast<GEPOperator>(CE);
+ auto *GEP = dyn_cast<GEPOperator>(CE);
if (!GEP)
return false;
@@ -293,7 +293,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
return true;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+ if (auto *CI = dyn_cast<ConstantInt>(C)) {
if (CI->getBitWidth() > 64 ||
(CI->getBitWidth() & 7) != 0)
return false;
@@ -311,7 +311,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
return true;
}
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ if (auto *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
@@ -327,7 +327,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
return false;
}
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
+ if (auto *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = DL.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
@@ -371,7 +371,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
uint64_t Index = ByteOffset / EltSize;
uint64_t Offset = ByteOffset - Index * EltSize;
uint64_t NumElts;
- if (ArrayType *AT = dyn_cast<ArrayType>(C->getType()))
+ if (auto *AT = dyn_cast<ArrayType>(C->getType()))
NumElts = AT->getNumElements();
else
NumElts = C->getType()->getVectorNumElements();
@@ -393,7 +393,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
return true;
}
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
@@ -407,8 +407,8 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
const DataLayout &DL) {
- PointerType *PTy = cast<PointerType>(C->getType());
- IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
+ auto *PTy = cast<PointerType>(C->getType());
+ auto *IntType = dyn_cast<IntegerType>(LoadTy);
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
@@ -446,7 +446,7 @@ Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, DL))
return nullptr;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
+ auto *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
return nullptr;
@@ -534,7 +534,7 @@ Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, Type *DestTy,
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
const DataLayout &DL) {
// First, try the easy cases:
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
+ if (auto *GV = dyn_cast<GlobalVariable>(C))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
return GV->getInitializer();
@@ -543,12 +543,12 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
// If the loaded value isn't a constant expr, we can't handle it.
- ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
+ auto *CE = dyn_cast<ConstantExpr>(C);
if (!CE)
return nullptr;
if (CE->getOpcode() == Instruction::GetElementPtr) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
+ if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
@@ -597,8 +597,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
- if (GlobalVariable *GV =
- dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
+ if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
if (GV->getInitializer()->isNullValue())
return Constant::getNullValue(Ty);
@@ -616,7 +615,7 @@ namespace {
Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
if (LI->isVolatile()) return nullptr;
- if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
+ if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
return nullptr;
@@ -705,7 +704,7 @@ Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
return nullptr;
Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], NewIdxs);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
}
@@ -716,9 +715,9 @@ Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
/// Strip the pointer casts, but preserve the address space information.
Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
- PointerType *OldPtrTy = cast<PointerType>(Ptr->getType());
+ auto *OldPtrTy = cast<PointerType>(Ptr->getType());
Ptr = Ptr->stripPointerCasts();
- PointerType *NewPtrTy = cast<PointerType>(Ptr->getType());
+ auto *NewPtrTy = cast<PointerType>(Ptr->getType());
ElemTy = NewPtrTy->getPointerElementType();
@@ -758,7 +757,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
- ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
+ auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
assert((!CE || CE->getType() == IntPtrTy) &&
"CastGEPIndices didn't canonicalize index types!");
if (CE && CE->getOpcode() == Instruction::Sub &&
@@ -766,7 +765,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
Res = ConstantExpr::getSub(Res, CE->getOperand(1));
Res = ConstantExpr::getIntToPtr(Res, ResTy);
- if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
+ if (auto *ResCE = dyn_cast<ConstantExpr>(Res))
Res = ConstantFoldConstantExpression(ResCE, DL, TLI);
return Res;
}
@@ -783,13 +782,13 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
// If this is a GEP of a GEP, fold it all into a single GEP.
- while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
+ while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
// Do not try the incorporate the sub-GEP if some index is not a number.
bool AllConstantInt = true;
- for (unsigned i = 0, e = NestedOps.size(); i != e; ++i)
- if (!isa<ConstantInt>(NestedOps[i])) {
+ for (Value *NestedOp : NestedOps)
+ if (!isa<ConstantInt>(NestedOp)) {
AllConstantInt = false;
break;
}
@@ -805,9 +804,9 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
// If the base value for this address is a literal integer value, fold the
// getelementptr to the resulting integer value casted to the pointer type.
APInt BasePtr(BitWidth, 0);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
+ if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
if (CE->getOpcode() == Instruction::IntToPtr) {
- if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
+ if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
BasePtr = Base->getValue().zextOrTrunc(BitWidth);
}
}
@@ -859,7 +858,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
}
} else {
- StructType *STy = cast<StructType>(Ty);
+ auto *STy = cast<StructType>(Ty);
// If we end up with an offset that isn't valid for this struct type, we
// can't re-form this GEP in a regular form, so bail out. The pointer
// operand likely went through casts that are necessary to make the GEP
@@ -933,7 +932,7 @@ Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, Type *DestTy,
case Instruction::ICmp:
case Instruction::FCmp: llvm_unreachable("Invalid for compares");
case Instruction::Call:
- if (Function *F = dyn_cast<Function>(Ops.back()))
+ if (auto *F = dyn_cast<Function>(Ops.back()))
if (canConstantFoldCallTo(F))
return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
return nullptr;
@@ -957,7 +956,7 @@ Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, Type *DestTy,
Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
- if (PHINode *PN = dyn_cast<PHINode>(I)) {
+ if (auto *PN = dyn_cast<PHINode>(I)) {
Constant *CommonValue = nullptr;
for (Value *Incoming : PN->incoming_values()) {
@@ -968,11 +967,11 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
if (isa<UndefValue>(Incoming))
continue;
// If the incoming value is not a constant, then give up.
- Constant *C = dyn_cast<Constant>(Incoming);
+ auto *C = dyn_cast<Constant>(Incoming);
if (!C)
return nullptr;
// Fold the PHI's operands.
- if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
+ if (auto *NewC = dyn_cast<ConstantExpr>(C))
C = ConstantFoldConstantExpression(NewC, DL, TLI);
// If the incoming value is a different constant to
// the one we saw previously, then give up.
@@ -992,30 +991,30 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
return nullptr;
SmallVector<Constant *, 8> Ops;
- for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
- Constant *Op = cast<Constant>(*i);
+ for (const Use &OpU : I->operands()) {
+ auto *Op = cast<Constant>(&OpU);
// Fold the Instruction's operands.
- if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
+ if (auto *NewCE = dyn_cast<ConstantExpr>(Op))
Op = ConstantFoldConstantExpression(NewCE, DL, TLI);
Ops.push_back(Op);
}
- if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+ if (const auto *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
DL, TLI);
- if (const LoadInst *LI = dyn_cast<LoadInst>(I))
+ if (const auto *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, DL);
- if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) {
+ if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
return ConstantExpr::getInsertValue(
cast<Constant>(IVI->getAggregateOperand()),
cast<Constant>(IVI->getInsertedValueOperand()),
IVI->getIndices());
}
- if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) {
+ if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
return ConstantExpr::getExtractValue(
cast<Constant>(EVI->getAggregateOperand()),
EVI->getIndices());
@@ -1031,12 +1030,11 @@ ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout &DL,
const TargetLibraryInfo *TLI,
SmallPtrSetImpl<ConstantExpr *> &FoldedOps) {
SmallVector<Constant *, 8> Ops;
- for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e;
- ++i) {
- Constant *NewC = cast<Constant>(*i);
+ for (const Use &NewU : CE->operands()) {
+ auto *NewC = cast<Constant>(&NewU);
// Recursively fold the ConstantExpr's operands. If we have already folded
// a ConstantExpr, we don't have to process it again.
- if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) {
+ if (auto *NewCE = dyn_cast<ConstantExpr>(NewC)) {
if (FoldedOps.insert(NewCE).second)
NewC = ConstantFoldConstantExpressionImpl(NewCE, DL, TLI, FoldedOps);
}
@@ -1088,7 +1086,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
// FIXME: The following comment is out of data and the DataLayout is here now.
// ConstantExpr::getCompare cannot do this, because it doesn't have DL
// around to know if bit truncation is happening.
- if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
+ if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
if (Ops1->isNullValue()) {
if (CE0->getOpcode() == Instruction::IntToPtr) {
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
@@ -1112,7 +1110,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
}
}
- if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
+ if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (CE0->getOpcode() == CE1->getOpcode()) {
if (CE0->getOpcode() == Instruction::IntToPtr) {
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
@@ -1176,7 +1174,7 @@ Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
@@ -1197,7 +1195,7 @@ Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
// the int size is >= the ptr size and the address spaces are the same.
// This requires knowing the width of a pointer, so it can't be done in
// ConstantExpr::getCast.
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::PtrToInt) {
Constant *SrcPtr = CE->getOperand(0);
unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
@@ -1243,12 +1241,13 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
return C;
}
-Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
- ArrayRef<Constant*> Indices) {
+Constant *
+llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
+ ArrayRef<Constant *> Indices) {
// Loop over all of the operands, tracking down which value we are
// addressing.
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- C = C->getAggregateElement(Indices[i]);
+ for (Constant *Index : Indices) {
+ C = C->getAggregateElement(Index);
if (!C)
return nullptr;
}
@@ -1454,7 +1453,7 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
if (IntrinsicID == Intrinsic::cos)
return Constant::getNullValue(Ty);
}
- if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
+ if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
if (IntrinsicID == Intrinsic::convert_to_fp16) {
APFloat Val(Op->getValueAPF());
@@ -1625,7 +1624,7 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
return nullptr;
}
- if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
+ if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
switch (IntrinsicID) {
case Intrinsic::bswap:
return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
@@ -1655,7 +1654,7 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
// Support ConstantVector in case we have an Undef in the top.
if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
- Constant *Op = cast<Constant>(Operands[0]);
+ auto *Op = cast<Constant>(Operands[0]);
switch (IntrinsicID) {
default: break;
case Intrinsic::x86_sse_cvtss2si:
@@ -1687,12 +1686,12 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
}
if (Operands.size() == 2) {
- if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
+ if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
return nullptr;
double Op1V = getValueAsDouble(Op1);
- if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
+ if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return nullptr;
@@ -1730,7 +1729,7 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
if ((Name == "atan2" && TLI->has(LibFunc::atan2)) ||
(Name == "atan2f" && TLI->has(LibFunc::atan2f)))
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
- } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
+ } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
return ConstantFP::get(Ty->getContext(),
APFloat((float)std::pow((float)Op1V,
@@ -1747,8 +1746,8 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
return nullptr;
}
- if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
- if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
+ if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
+ if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (IntrinsicID) {
default: break;
case Intrinsic::sadd_with_overflow:
@@ -1805,9 +1804,9 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
if (Operands.size() != 3)
return nullptr;
- if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
- if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
- if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
+ if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
+ if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
+ if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
switch (IntrinsicID) {
default: break;
case Intrinsic::fma:
@@ -1867,7 +1866,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
Type *Ty = F->getReturnType();
- if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI);
return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);