86 #include "llvm/Config/llvm-config.h" 137 using namespace llvm;
139 #define DEBUG_TYPE "scalar-evolution" 142 "Number of trip counts computed with array length");
144 "Number of loops with predictable loop counts");
146 "Number of loops without predictable loop counts");
147 STATISTIC(NumBruteForceTripCountsComputed,
148 "Number of loops with trip counts computed by force");
152 cl::desc(
"Maximum number of iterations SCEV will " 153 "symbolically execute a constant " 160 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
163 cl::desc(
"Verify no dangling value in ScalarEvolution's " 164 "ExprValueMap (slow)"));
168 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
173 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
178 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
182 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
183 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
187 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
188 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
192 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
193 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
198 cl::desc(
"Maximum depth of recursive arithmetics"),
202 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
207 cl::desc(
"Maximum depth of recursive SExt/ZExt"),
212 cl::desc(
"Max coefficients in AddRec during evolving"),
223 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 233 cast<SCEVConstant>(
this)->getValue()->printAsOperand(OS,
false);
238 OS <<
"(trunc " << *Op->getType() <<
" " << *Op <<
" to " 245 OS <<
"(zext " << *Op->getType() <<
" " << *Op <<
" to " 252 OS <<
"(sext " << *Op->getType() <<
" " << *Op <<
" to " 278 const char *OpStr =
nullptr;
289 if (std::next(I) !=
E)
305 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
312 OS <<
"sizeof(" << *AllocTy <<
")";
316 OS <<
"alignof(" << *AllocTy <<
")";
323 OS <<
"offsetof(" << *CTy <<
", ";
334 OS <<
"***COULDNOTCOMPUTE***";
343 return cast<SCEVConstant>(
this)->
getType();
347 return cast<SCEVCastExpr>(
this)->
getType();
352 return cast<SCEVNAryExpr>(
this)->
getType();
354 return cast<SCEVAddExpr>(
this)->
getType();
356 return cast<SCEVUDivExpr>(
this)->
getType();
358 return cast<SCEVUnknown>(
this)->
getType();
367 return SC->getValue()->isZero();
373 return SC->getValue()->isOne();
379 return SC->getValue()->isMinusOne();
385 if (!Mul)
return false;
389 if (!SC)
return false;
407 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
409 UniqueSCEVs.InsertNode(S, IP);
419 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
425 :
SCEV(ID, SCEVTy),
Op(op), Ty(ty) {}
431 "Cannot truncate non-integer value!");
438 "Cannot zero extend non-integer value!");
445 "Cannot sign extend non-integer value!");
448 void SCEVUnknown::deleted() {
450 SE->forgetMemoizedResults(
this);
453 SE->UniqueSCEVs.RemoveNode(
this);
459 void SCEVUnknown::allUsesReplacedWith(
Value *New) {
461 SE->UniqueSCEVs.RemoveNode(
this);
470 if (
ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
471 if (VCE->getOpcode() == Instruction::PtrToInt)
472 if (
ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
473 if (CE->getOpcode() == Instruction::GetElementPtr &&
474 CE->getOperand(0)->isNullValue() &&
475 CE->getNumOperands() == 2)
476 if (
ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
478 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
487 if (
ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
488 if (VCE->getOpcode() == Instruction::PtrToInt)
489 if (
ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
490 if (CE->getOpcode() == Instruction::GetElementPtr &&
491 CE->getOperand(0)->isNullValue()) {
493 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
494 if (
StructType *STy = dyn_cast<StructType>(Ty))
495 if (!STy->isPacked() &&
496 CE->getNumOperands() == 3 &&
497 CE->getOperand(1)->isNullValue()) {
498 if (
ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
500 STy->getNumElements() == 2 &&
501 STy->getElementType(0)->isIntegerTy(1)) {
502 AllocTy = STy->getElementType(1);
512 if (
ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
513 if (VCE->getOpcode() == Instruction::PtrToInt)
514 if (
ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
515 if (CE->getOpcode() == Instruction::GetElementPtr &&
516 CE->getNumOperands() == 3 &&
517 CE->getOperand(0)->isNullValue() &&
518 CE->getOperand(1)->isNullValue()) {
520 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
525 FieldNo = CE->getOperand(2);
566 if (LIsPointer != RIsPointer)
567 return (
int)LIsPointer - (int)RIsPointer;
572 return (
int)LID - (int)RID;
575 if (
const auto *LA = dyn_cast<Argument>(LV)) {
576 const auto *
RA = cast<Argument>(RV);
577 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
578 return (
int)LArgNo - (int)RArgNo;
581 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
582 const auto *RGV = cast<GlobalValue>(RV);
584 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
585 auto LT = GV->getLinkage();
592 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
593 return LGV->getName().compare(RGV->getName());
598 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
599 const auto *RInst = cast<Instruction>(RV);
604 if (LParent != RParent) {
607 if (LDepth != RDepth)
608 return (
int)LDepth - (int)RDepth;
612 unsigned LNumOps = LInst->getNumOperands(),
613 RNumOps = RInst->getNumOperands();
614 if (LNumOps != RNumOps)
615 return (
int)LNumOps - (int)RNumOps;
617 for (
unsigned Idx :
seq(0u, LNumOps)) {
620 RInst->getOperand(Idx), Depth + 1);
645 return (
int)LType - (int)RType;
652 switch (static_cast<SCEVTypes>(LType)) {
658 RU->getValue(),
Depth + 1);
670 const APInt &
RA = RC->getAPInt();
671 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.
getBitWidth();
672 if (LBitWidth != RBitWidth)
673 return (
int)LBitWidth - (int)RBitWidth;
674 return LA.ult(RA) ? -1 : 1;
684 const Loop *LLoop = LA->
getLoop(), *RLoop = RA->getLoop();
685 if (LLoop != RLoop) {
686 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
687 assert(LHead != RHead &&
"Two loops share the same header?");
692 "No dominance between recurrences used by one SCEV?");
697 unsigned LNumOps = LA->
getNumOperands(), RNumOps = RA->getNumOperands();
698 if (LNumOps != RNumOps)
699 return (
int)LNumOps - (int)RNumOps;
702 for (
unsigned i = 0; i != LNumOps; ++i) {
721 unsigned LNumOps = LC->
getNumOperands(), RNumOps = RC->getNumOperands();
722 if (LNumOps != RNumOps)
723 return (
int)LNumOps - (int)RNumOps;
725 for (
unsigned i = 0; i != LNumOps; ++i) {
742 RC->getLHS(), DT,
Depth + 1);
746 RC->getRHS(), DT,
Depth + 1);
784 if (Ops.
size() < 2)
return;
788 if (Ops.
size() == 2) {
791 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
798 std::stable_sort(Ops.
begin(), Ops.
end(),
799 [&](
const SCEV *LHS,
const SCEV *RHS) {
808 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
809 const SCEV *S = Ops[i];
814 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
819 if (i == e-2)
return;
827 struct FindSCEVSize {
830 FindSCEVSize() =
default;
832 bool follow(
const SCEV *S) {
838 bool isDone()
const {
851 struct SCEVDivision :
public SCEVVisitor<SCEVDivision, void> {
856 const SCEV *Denominator,
const SCEV **Quotient,
857 const SCEV **Remainder) {
858 assert(Numerator && Denominator &&
"Uninitialized SCEV");
860 SCEVDivision
D(SE, Numerator, Denominator);
864 if (Numerator == Denominator) {
870 if (Numerator->
isZero()) {
877 if (Denominator->
isOne()) {
878 *Quotient = Numerator;
884 if (
const SCEVMulExpr *
T = dyn_cast<SCEVMulExpr>(Denominator)) {
886 *Quotient = Numerator;
887 for (
const SCEV *
Op :
T->operands()) {
888 divide(SE, *Quotient,
Op, &Q, &R);
895 *Remainder = Numerator;
904 *Quotient = D.Quotient;
905 *Remainder = D.Remainder;
920 if (
const SCEVConstant *
D = dyn_cast<SCEVConstant>(Denominator)) {
922 APInt DenominatorVal =
D->getAPInt();
926 if (NumeratorBW > DenominatorBW)
927 DenominatorVal = DenominatorVal.
sext(NumeratorBW);
928 else if (NumeratorBW < DenominatorBW)
929 NumeratorVal = NumeratorVal.
sext(DenominatorBW);
933 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
941 const SCEV *StartQ, *StartR, *StepQ, *StepR;
943 return cannotDivide(Numerator);
944 divide(SE, Numerator->
getStart(), Denominator, &StartQ, &StartR);
948 if (Ty != StartQ->getType() || Ty != StartR->
getType() ||
949 Ty != StepQ->getType() || Ty != StepR->
getType())
950 return cannotDivide(Numerator);
963 divide(SE,
Op, Denominator, &Q, &R);
967 return cannotDivide(Numerator);
973 if (Qs.
size() == 1) {
987 bool FoundDenominatorTerm =
false;
991 return cannotDivide(Numerator);
993 if (FoundDenominatorTerm) {
1000 divide(SE,
Op, Denominator, &Q, &R);
1008 return cannotDivide(Numerator);
1010 FoundDenominatorTerm =
true;
1014 if (FoundDenominatorTerm) {
1023 if (!isa<SCEVUnknown>(Denominator))
1024 return cannotDivide(Numerator);
1028 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
1029 cast<SCEVConstant>(Zero)->getValue();
1032 if (Remainder->
isZero()) {
1034 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
1035 cast<SCEVConstant>(One)->getValue();
1046 return cannotDivide(Numerator);
1047 divide(SE, Diff, Denominator, &Q, &R);
1049 return cannotDivide(Numerator);
1055 const SCEV *Denominator)
1056 : SE(S), Denominator(Denominator) {
1063 cannotDivide(Numerator);
1068 void cannotDivide(
const SCEV *Numerator) {
1070 Remainder = Numerator;
1074 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
1151 APInt OddFactorial(W, 1);
1153 for (
unsigned i = 3; i <= K; ++i) {
1158 OddFactorial *=
Mult;
1162 unsigned CalculationBits = W +
T;
1171 APInt MultiplyFactor = OddFactorial.
zext(W+1);
1173 MultiplyFactor = MultiplyFactor.
trunc(W);
1179 for (
unsigned i = 1; i != K; ++i) {
1204 const SCEV *Result = getStart();
1205 for (
unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1210 if (isa<SCEVCouldNotCompute>(Coeff))
1224 assert(getTypeSizeInBits(Op->
getType()) > getTypeSizeInBits(Ty) &&
1225 "This is not a truncating conversion!");
1227 "This is not a conversion to a SCEVable type!");
1228 Ty = getEffectiveSCEVType(Ty);
1235 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
1244 return getTruncateExpr(
ST->getOperand(),
Ty);
1248 return getTruncateOrSignExtend(SS->getOperand(),
Ty);
1252 return getTruncateOrZeroExtend(SZ->getOperand(),
Ty);
1258 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(
Op)) {
1259 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1261 unsigned numTruncs = 0;
1262 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1264 const SCEV *S = getTruncateExpr(CommOp->getOperand(i),
Ty);
1265 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S))
1267 Operands.push_back(S);
1269 if (numTruncs < 2) {
1270 if (isa<SCEVAddExpr>(Op))
1271 return getAddExpr(Operands);
1272 else if (isa<SCEVMulExpr>(Op))
1273 return getMulExpr(Operands);
1280 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1285 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1287 for (
const SCEV *Op : AddRec->operands())
1288 Operands.
push_back(getTruncateExpr(Op, Ty));
1297 UniqueSCEVs.InsertNode(S, IP);
1298 addToLoopUseLists(S);
1337 struct ExtendOpTraitsBase {
1343 template <
typename ExtendOp>
struct ExtendOpTraits {
1356 struct ExtendOpTraits<SCEVSignExtendExpr> :
public ExtendOpTraitsBase {
1359 static const GetExtendExprTy GetExtendExpr;
1361 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1368 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1372 struct ExtendOpTraits<SCEVZeroExtendExpr> :
public ExtendOpTraitsBase {
1375 static const GetExtendExprTy GetExtendExpr;
1377 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1384 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1396 template <
typename ExtendOpTy>
1399 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1400 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1426 auto PreStartFlags =
1444 const SCEV *OperandExtendedStart =
1445 SE->
getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1446 (SE->*GetExtendExpr)(Step, WideTy, Depth));
1447 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1459 const SCEV *OverflowLimit =
1460 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1462 if (OverflowLimit &&
1470 template <
typename ExtendOpTy>
1474 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1476 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR,
Ty, SE,
Depth);
1478 return (SE->*GetExtendExpr)(AR->getStart(),
Ty,
Depth);
1480 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE),
Ty,
1482 (SE->*GetExtendExpr)(PreStart,
Ty,
Depth));
1517 template <
typename ExtendOpTy>
1518 bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1521 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1533 for (
unsigned Delta : {-2, -1, 1, 2}) {
1543 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1547 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1550 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1551 DeltaS, &Pred,
this);
1552 if (Limit && isKnownPredicate(Pred, PreAR, Limit))
1577 return TZ < BitWidth ? C.
trunc(TZ).
zext(BitWidth) :
C;
1579 return APInt(BitWidth, 0);
1587 const APInt &ConstantStart,
1589 const unsigned BitWidth = ConstantStart.
getBitWidth();
1592 return TZ < BitWidth ? ConstantStart.
trunc(TZ).
zext(BitWidth)
1594 return APInt(BitWidth, 0);
1599 assert(getTypeSizeInBits(Op->
getType()) < getTypeSizeInBits(Ty) &&
1600 "This is not an extending conversion!");
1602 "This is not a conversion to a SCEVable type!");
1603 Ty = getEffectiveSCEVType(Ty);
1612 return getZeroExtendExpr(SZ->getOperand(),
Ty, Depth + 1);
1621 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
1625 UniqueSCEVs.InsertNode(S, IP);
1626 addToLoopUseLists(S);
1634 const SCEV *
X =
ST->getOperand();
1636 unsigned TruncBits = getTypeSizeInBits(
ST->getType());
1637 unsigned NewBits = getTypeSizeInBits(Ty);
1640 return getTruncateOrZeroExtend(X, Ty);
1648 if (AR->isAffine()) {
1649 const SCEV *Start = AR->getStart();
1650 const SCEV *Step = AR->getStepRecurrence(*
this);
1651 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1652 const Loop *L = AR->getLoop();
1654 if (!AR->hasNoUnsignedWrap()) {
1655 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1661 if (AR->hasNoUnsignedWrap())
1662 return getAddRecExpr(
1663 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this, Depth + 1),
1664 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1674 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1675 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1681 const SCEV *CastedMaxBECount =
1682 getTruncateOrZeroExtend(MaxBECount, Start->
getType());
1683 const SCEV *RecastedMaxBECount =
1684 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->
getType());
1685 if (MaxBECount == RecastedMaxBECount) {
1688 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1690 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1694 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1695 const SCEV *WideMaxBECount =
1696 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1697 const SCEV *OperandExtendedAdd =
1698 getAddExpr(WideStart,
1699 getMulExpr(WideMaxBECount,
1700 getZeroExtendExpr(Step, WideTy, Depth + 1),
1703 if (ZAdd == OperandExtendedAdd) {
1707 return getAddRecExpr(
1708 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1710 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1711 AR->getNoWrapFlags());
1715 OperandExtendedAdd =
1716 getAddExpr(WideStart,
1717 getMulExpr(WideMaxBECount,
1718 getSignExtendExpr(Step, WideTy, Depth + 1),
1721 if (ZAdd == OperandExtendedAdd) {
1726 return getAddRecExpr(
1727 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1729 getSignExtendExpr(Step, Ty, Depth + 1), L,
1730 AR->getNoWrapFlags());
1742 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1743 !AC.assumptions().empty()) {
1751 getUnsignedRangeMax(Step));
1758 return getAddRecExpr(
1759 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1761 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1762 AR->getNoWrapFlags());
1766 getSignedRangeMin(Step));
1774 return getAddRecExpr(
1775 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1777 getSignExtendExpr(Step, Ty, Depth + 1), L,
1778 AR->getNoWrapFlags());
1786 if (
const auto *
SC = dyn_cast<SCEVConstant>(Start)) {
1791 const SCEV *SResidual =
1792 getAddRecExpr(
getConstant(C - D), Step, L, AR->getNoWrapFlags());
1793 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1794 return getAddExpr(SZExtD, SZExtR,
1800 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1802 return getAddRecExpr(
1803 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this, Depth + 1),
1804 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1812 if (matchURem(Op, LHS, RHS))
1813 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1814 getZeroExtendExpr(RHS, Ty, Depth + 1));
1818 if (
auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1819 return getUDivExpr(getZeroExtendExpr(Div->getLHS(),
Ty, Depth + 1),
1820 getZeroExtendExpr(Div->getRHS(),
Ty, Depth + 1));
1822 if (
auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1824 if (SA->hasNoUnsignedWrap()) {
1828 for (
const auto *Op : SA->operands())
1829 Ops.
push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1841 if (
const auto *
SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1845 const SCEV *SResidual =
1847 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1848 return getAddExpr(SZExtD, SZExtR,
1855 if (
auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1857 if (SM->hasNoUnsignedWrap()) {
1861 for (
const auto *Op : SM->operands())
1862 Ops.
push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1878 if (SM->getNumOperands() == 2)
1879 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1880 if (MulLHS->getAPInt().isPowerOf2())
1881 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1882 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1883 MulLHS->getAPInt().logBase2();
1886 getZeroExtendExpr(MulLHS, Ty),
1888 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1895 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
1898 UniqueSCEVs.InsertNode(S, IP);
1899 addToLoopUseLists(S);
1905 assert(getTypeSizeInBits(Op->
getType()) < getTypeSizeInBits(Ty) &&
1906 "This is not an extending conversion!");
1908 "This is not a conversion to a SCEVable type!");
1909 Ty = getEffectiveSCEVType(Ty);
1918 return getSignExtendExpr(SS->getOperand(),
Ty, Depth + 1);
1922 return getZeroExtendExpr(SZ->getOperand(),
Ty, Depth + 1);
1931 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
1936 UniqueSCEVs.InsertNode(S, IP);
1937 addToLoopUseLists(S);
1945 const SCEV *
X =
ST->getOperand();
1947 unsigned TruncBits = getTypeSizeInBits(
ST->getType());
1948 unsigned NewBits = getTypeSizeInBits(Ty);
1951 return getTruncateOrSignExtend(X, Ty);
1954 if (
auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1956 if (SA->hasNoSignedWrap()) {
1960 for (
const auto *Op : SA->operands())
1961 Ops.
push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1974 if (
const auto *
SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1978 const SCEV *SResidual =
1980 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
1981 return getAddExpr(SSExtD, SSExtR,
1992 if (AR->isAffine()) {
1993 const SCEV *Start = AR->getStart();
1994 const SCEV *Step = AR->getStepRecurrence(*
this);
1995 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1996 const Loop *L = AR->getLoop();
1998 if (!AR->hasNoSignedWrap()) {
1999 auto NewFlags = proveNoWrapViaConstantRanges(AR);
2005 if (AR->hasNoSignedWrap())
2006 return getAddRecExpr(
2007 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this, Depth + 1),
2018 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
2019 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2025 const SCEV *CastedMaxBECount =
2026 getTruncateOrZeroExtend(MaxBECount, Start->
getType());
2027 const SCEV *RecastedMaxBECount =
2028 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->
getType());
2029 if (MaxBECount == RecastedMaxBECount) {
2032 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
2034 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
2038 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
2039 const SCEV *WideMaxBECount =
2040 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
2041 const SCEV *OperandExtendedAdd =
2042 getAddExpr(WideStart,
2043 getMulExpr(WideMaxBECount,
2044 getSignExtendExpr(Step, WideTy, Depth + 1),
2047 if (SAdd == OperandExtendedAdd) {
2051 return getAddRecExpr(
2052 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2054 getSignExtendExpr(Step, Ty, Depth + 1), L,
2055 AR->getNoWrapFlags());
2059 OperandExtendedAdd =
2060 getAddExpr(WideStart,
2061 getMulExpr(WideMaxBECount,
2062 getZeroExtendExpr(Step, WideTy, Depth + 1),
2065 if (SAdd == OperandExtendedAdd) {
2077 return getAddRecExpr(
2078 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2080 getZeroExtendExpr(Step, Ty, Depth + 1), L,
2081 AR->getNoWrapFlags());
2094 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
2095 !AC.assumptions().empty()) {
2102 const SCEV *OverflowLimit =
2104 if (OverflowLimit &&
2105 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
2106 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
2109 return getAddRecExpr(
2110 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this, Depth + 1),
2111 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2118 if (
const auto *
SC = dyn_cast<SCEVConstant>(Start)) {
2123 const SCEV *SResidual =
2124 getAddRecExpr(
getConstant(C - D), Step, L, AR->getNoWrapFlags());
2125 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2126 return getAddExpr(SSExtD, SSExtR,
2132 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2134 return getAddRecExpr(
2135 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this, Depth + 1),
2136 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2143 return getZeroExtendExpr(Op, Ty, Depth + 1);
2147 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
2150 UniqueSCEVs.InsertNode(S, IP);
2151 addToLoopUseLists(S);
2159 assert(getTypeSizeInBits(Op->
getType()) < getTypeSizeInBits(Ty) &&
2160 "This is not an extending conversion!");
2162 "This is not a conversion to a SCEVable type!");
2163 Ty = getEffectiveSCEVType(Ty);
2167 if (
SC->getAPInt().isNegative())
2168 return getSignExtendExpr(Op, Ty);
2172 const SCEV *NewOp =
T->getOperand();
2173 if (getTypeSizeInBits(NewOp->
getType()) < getTypeSizeInBits(Ty))
2174 return getAnyExtendExpr(NewOp, Ty);
2175 return getTruncateOrNoop(NewOp, Ty);
2179 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2180 if (!isa<SCEVZeroExtendExpr>(ZExt))
2184 const SCEV *SExt = getSignExtendExpr(Op, Ty);
2185 if (!isa<SCEVSignExtendExpr>(SExt))
2191 for (
const SCEV *Op : AR->operands())
2192 Ops.
push_back(getAnyExtendExpr(Op, Ty));
2193 return getAddRecExpr(Ops, AR->getLoop(),
SCEV::FlagNW);
2197 if (isa<SCEVSMaxExpr>(Op))
2230 APInt &AccumulatedConstant,
2231 const SCEV *
const *Ops,
size_t NumOperands,
2234 bool Interesting =
false;
2238 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2241 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2243 AccumulatedConstant += Scale *
C->getAPInt();
2248 for (; i != NumOperands; ++i) {
2250 if (Mul && isa<SCEVConstant>(Mul->
getOperand(0))) {
2252 Scale * cast<SCEVConstant>(Mul->
getOperand(0))->getAPInt();
2269 Pair.first->second += NewScale;
2277 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2278 M.
insert({Ops[i], Scale});
2282 Pair.first->second += Scale;
2300 using namespace std::placeholders;
2307 assert(CanAnalyze &&
"don't call from other places!");
2314 auto IsKnownNonNegative = [&](
const SCEV *S) {
2324 if (SignOrUnsignWrap != SignOrUnsignMask &&
2326 isa<SCEVConstant>(Ops[0])) {
2333 return Instruction::Mul;
2339 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2344 Opcode, C, OBO::NoSignedWrap);
2352 Opcode, C, OBO::NoUnsignedWrap);
2370 "only nuw or nsw allowed");
2372 if (Ops.
size() == 1)
return Ops[0];
2374 Type *ETy = getEffectiveSCEVType(Ops[0]->
getType());
2375 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2377 "SCEVAddExpr operand types don't match!");
2387 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2390 while (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2392 Ops[0] =
getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2393 if (Ops.
size() == 2)
return Ops[0];
2395 LHSC = cast<SCEVConstant>(Ops[0]);
2399 if (LHSC->getValue()->isZero()) {
2404 if (Ops.
size() == 1)
return Ops[0];
2409 return getOrCreateAddExpr(Ops, Flags);
2414 Type *
Ty = Ops[0]->getType();
2415 bool FoundMatch =
false;
2416 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2417 if (Ops[i] == Ops[i+1]) {
2420 while (i+Count != e && Ops[i+Count] == Ops[i])
2425 if (Ops.
size() == Count)
2429 --i; e -= Count - 1;
2433 return getAddExpr(Ops, Flags, Depth + 1);
2439 auto FindTruncSrcType = [&]() ->
Type * {
2444 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2445 return T->getOperand()->getType();
2446 if (
const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2447 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2448 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2449 return T->getOperand()->getType();
2453 if (
auto *SrcType = FindTruncSrcType()) {
2458 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
2460 if (
T->getOperand()->getType() != SrcType) {
2465 }
else if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2466 LargeOps.
push_back(getAnyExtendExpr(
C, SrcType));
2467 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2469 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2471 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2472 if (
T->getOperand()->getType() != SrcType) {
2477 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2478 LargeMulOps.
push_back(getAnyExtendExpr(
C, SrcType));
2495 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2496 return getTruncateExpr(Fold, Ty);
2501 while (Idx < Ops.
size() && Ops[Idx]->getSCEVType() <
scAddExpr)
2505 if (Idx < Ops.
size()) {
2506 bool DeletedAdd =
false;
2507 while (
const SCEVAddExpr *
Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2526 while (Idx < Ops.
size() && Ops[Idx]->getSCEVType() <
scMulExpr)
2531 if (Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[Idx])) {
2532 uint64_t BitWidth = getTypeSizeInBits(Ty);
2535 APInt AccumulatedConstant(BitWidth, 0);
2538 APInt(BitWidth, 1), *
this)) {
2539 struct APIntCompare {
2540 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2541 return LHS.
ult(RHS);
2548 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2549 for (
const SCEV *NewOp : NewOps)
2550 MulOpLists[M.
find(NewOp)->second].push_back(NewOp);
2553 if (AccumulatedConstant != 0)
2555 for (
auto &MulOp : MulOpLists)
2556 if (MulOp.first != 0)
2563 if (Ops.
size() == 1)
2572 for (; Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2573 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2574 for (
unsigned MulOp = 0, e = Mul->
getNumOperands(); MulOp != e; ++MulOp) {
2576 if (isa<SCEVConstant>(MulOpSCEV))
2578 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2579 if (MulOpSCEV == Ops[AddOp]) {
2592 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2594 if (Ops.
size() == 2)
return OuterMul;
2607 for (
unsigned OtherMulIdx = Idx+1;
2608 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2610 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2614 OMulOp != e; ++OMulOp)
2615 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2632 const SCEV *InnerMulSum =
2634 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2636 if (Ops.
size() == 2)
return OuterMul;
2653 for (; Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2659 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2660 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2667 if (!LIOps.
empty()) {
2676 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1);
2682 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2685 if (Ops.
size() == 1)
return NewRec;
2688 for (
unsigned i = 0;; ++i)
2689 if (Ops[i] == AddRec) {
2699 for (
unsigned OtherIdx = Idx+1;
2700 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2705 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2707 "AddRecExprs are not sorted in reverse dominance order?");
2708 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2712 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2714 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2715 if (OtherAddRec->getLoop() == AddRecLoop) {
2716 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2718 if (i >= AddRecOps.size()) {
2719 AddRecOps.
append(OtherAddRec->op_begin()+i,
2720 OtherAddRec->op_end());
2724 AddRecOps[i], OtherAddRec->getOperand(i)};
2727 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2742 return getOrCreateAddExpr(Ops, Flags);
2750 for (
const SCEV *
Op : Ops)
2754 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2756 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(Ops.size());
2757 std::uninitialized_copy(Ops.begin(), Ops.end(),
O);
2758 S =
new (SCEVAllocator)
2760 UniqueSCEVs.InsertNode(S, IP);
2761 addToLoopUseLists(S);
2772 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2777 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2779 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(Ops.
size());
2780 std::uninitialized_copy(Ops.
begin(), Ops.
end(),
O);
2781 S =
new (SCEVAllocator)
2783 UniqueSCEVs.InsertNode(S, IP);
2784 addToLoopUseLists(S);
2795 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2799 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2801 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(Ops.
size());
2802 std::uninitialized_copy(Ops.
begin(), Ops.
end(),
O);
2805 UniqueSCEVs.InsertNode(S, IP);
2806 addToLoopUseLists(S);
2812 static uint64_t
umul_ov(uint64_t i, uint64_t j,
bool &Overflow) {
2814 if (j > 1 && k / j != i) Overflow =
true;
2821 static uint64_t
Choose(uint64_t n, uint64_t k,
bool &Overflow) {
2830 if (n == 0 || n == k)
return 1;
2831 if (k > n)
return 0;
2837 for (uint64_t i = 1; i <= k; ++i) {
2838 r =
umul_ov(r, n-(i-1), Overflow);
2847 struct FindConstantInAddMulChain {
2848 bool FoundConstant =
false;
2850 bool follow(
const SCEV *S) {
2851 FoundConstant |= isa<SCEVConstant>(S);
2852 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
2855 bool isDone()
const {
2856 return FoundConstant;
2860 FindConstantInAddMulChain
F;
2863 return F.FoundConstant;
2871 "only nuw or nsw allowed");
2873 if (Ops.
size() == 1)
return Ops[0];
2875 Type *ETy = getEffectiveSCEVType(Ops[0]->
getType());
2876 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2878 "SCEVMulExpr operand types don't match!");
2888 return getOrCreateMulExpr(Ops, Flags);
2892 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2894 if (Ops.
size() == 2)
2904 return getAddExpr(getMulExpr(LHSC,
Add->getOperand(0),
2906 getMulExpr(LHSC,
Add->getOperand(1),
2911 while (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2916 Ops.erase(Ops.begin()+1);
2917 if (Ops.size() == 1)
return Ops[0];
2918 LHSC = cast<SCEVConstant>(Ops[0]);
2922 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) {
2925 }
else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
2931 if (Ops.
size() == 2) {
2934 bool AnyFolded =
false;
2935 for (
const SCEV *AddOp :
Add->operands()) {
2938 if (!isa<SCEVMulExpr>(Mul)) AnyFolded =
true;
2943 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
2946 for (
const SCEV *AddRecOp : AddRec->operands())
2950 return getAddRecExpr(Operands, AddRec->getLoop(),
2956 if (Ops.
size() == 1)
2961 while (Idx < Ops.
size() && Ops[Idx]->getSCEVType() <
scMulExpr)
2965 if (Idx < Ops.
size()) {
2966 bool DeletedMul =
false;
2967 while (
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2973 Ops.
append(Mul->op_begin(), Mul->op_end());
2991 for (; Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2997 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2998 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
3005 if (!LIOps.
empty()) {
3020 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
3023 if (Ops.
size() == 1)
return NewRec;
3026 for (
unsigned i = 0;; ++i)
3027 if (Ops[i] == AddRec) {
3048 bool OpsModified =
false;
3049 for (
unsigned OtherIdx = Idx+1;
3050 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3054 if (!OtherAddRec || OtherAddRec->
getLoop() != AddRecLoop)
3063 bool Overflow =
false;
3065 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3070 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3071 uint64_t Coeff1 =
Choose(x, 2*x - y, Overflow);
3074 z < ze && !Overflow; ++z) {
3075 uint64_t Coeff2 =
Choose(2*x - y, x-z, Overflow);
3077 if (LargerThan64Bits)
3078 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3080 Coeff = Coeff1*Coeff2;
3084 SumOps.
push_back(getMulExpr(CoeffTerm, Term1, Term2,
3093 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->
getLoop(),
3095 if (Ops.
size() == 2)
return NewAddRec;
3096 Ops[Idx] = NewAddRec;
3097 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3113 return getOrCreateMulExpr(Ops, Flags);
3120 getEffectiveSCEVType(RHS->
getType()) &&
3121 "SCEVURemExpr operand types don't match!");
3124 if (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3126 if (RHSC->getValue()->isOne())
3127 return getZero(LHS->
getType());
3130 if (RHSC->getAPInt().isPowerOf2()) {
3134 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3139 const SCEV *UDiv = getUDivExpr(LHS, RHS);
3149 getEffectiveSCEVType(RHS->
getType()) &&
3150 "SCEVUDivExpr operand types don't match!");
3152 if (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3153 if (RHSC->getValue()->isOne())
3158 if (!RHSC->getValue()->isZero()) {
3163 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3164 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3167 if (!RHSC->getAPInt().isPowerOf2())
3173 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3175 const APInt &StepInt = Step->getAPInt();
3176 const APInt &DivInt = RHSC->getAPInt();
3177 if (!StepInt.
urem(DivInt) &&
3178 getZeroExtendExpr(AR, ExtTy) ==
3179 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3180 getZeroExtendExpr(Step, ExtTy),
3183 for (
const SCEV *
Op : AR->operands())
3185 return getAddRecExpr(Operands, AR->getLoop(),
SCEV::FlagNW);
3191 if (StartC && !DivInt.
urem(StepInt) &&
3192 getZeroExtendExpr(AR, ExtTy) ==
3193 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3194 getZeroExtendExpr(Step, ExtTy),
3196 const APInt &StartInt = StartC->getAPInt();
3197 const APInt &StartRem = StartInt.
urem(StepInt);
3199 LHS = getAddRecExpr(
getConstant(StartInt - StartRem), Step,
3204 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3206 for (
const SCEV *
Op : M->operands())
3208 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3210 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3211 const SCEV *
Op = M->getOperand(i);
3212 const SCEV *Div = getUDivExpr(Op, RHSC);
3213 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3217 return getMulExpr(Operands);
3223 if (
const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3224 if (
auto *DivisorConstant =
3225 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3226 bool Overflow =
false;
3228 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3232 return getUDivExpr(OtherDiv->getLHS(),
getConstant(NewRHS));
3237 if (
const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3239 for (
const SCEV *
Op : A->operands())
3241 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3243 for (
unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3244 const SCEV *
Op = getUDivExpr(A->getOperand(i), RHS);
3245 if (isa<SCEVUDivExpr>(Op) ||
3246 getMulExpr(Op, RHS) != A->getOperand(i))
3250 if (Operands.
size() == A->getNumOperands())
3251 return getAddExpr(Operands);
3256 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3257 Constant *LHSCV = LHSC->getValue();
3258 Constant *RHSCV = RHSC->getValue();
3270 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
3273 UniqueSCEVs.InsertNode(S, IP);
3274 addToLoopUseLists(S);
3304 return getUDivExpr(LHS, RHS);
3306 if (
const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3309 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->
getOperand(0))) {
3310 if (LHSCst == RHSCst) {
3313 return getMulExpr(Operands);
3319 APInt Factor =
gcd(LHSCst, RHSCst);
3322 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3324 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3328 LHS = getMulExpr(Operands);
3332 return getUDivExactExpr(LHS, RHS);
3342 return getMulExpr(Operands);
3346 return getUDivExpr(LHS, RHS);
3356 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3357 if (StepChrec->getLoop() == L) {
3358 Operands.
append(StepChrec->op_begin(), StepChrec->op_end());
3359 return getAddRecExpr(Operands, L, maskFlags(Flags,
SCEV::FlagNW));
3363 return getAddRecExpr(Operands, L, Flags);
3371 if (Operands.
size() == 1)
return Operands[0];
3373 Type *ETy = getEffectiveSCEVType(Operands[0]->
getType());
3374 for (
unsigned i = 1, e = Operands.
size(); i != e; ++i)
3375 assert(getEffectiveSCEVType(Operands[i]->
getType()) == ETy &&
3376 "SCEVAddRecExpr operand types don't match!");
3377 for (
unsigned i = 0, e = Operands.
size(); i != e; ++i)
3379 "SCEVAddRecExpr operand is not loop-invariant!");
3396 if (
const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3397 const Loop *NestedLoop = NestedAR->getLoop();
3403 NestedAR->op_end());
3404 Operands[0] = NestedAR->getStart();
3408 bool AllInvariant =
all_of(
3417 maskFlags(Flags,
SCEV::FlagNW | NestedAR->getNoWrapFlags());
3419 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3420 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *Op) {
3430 maskFlags(NestedAR->getNoWrapFlags(),
SCEV::FlagNW | Flags);
3431 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3435 Operands[0] = NestedAR;
3441 return getOrCreateAddRecExpr(Operands, L, Flags);
3450 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->
getType());
3459 const SCEV *TotalOffset = getZero(IntPtrTy);
3463 for (
const SCEV *IndexExpr : IndexExprs) {
3465 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3469 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
3472 TotalOffset = getAddExpr(TotalOffset, FieldOffset);
3475 CurTy = STy->getTypeAtIndex(Index);
3478 CurTy = cast<SequentialType>(CurTy)->getElementType();
3480 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
3482 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
3485 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
3488 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
3493 return getAddExpr(BaseExpr, TotalOffset, Wrap);
3499 return getSMaxExpr(Ops);
3505 if (Ops.
size() == 1)
return Ops[0];
3507 Type *ETy = getEffectiveSCEVType(Ops[0]->
getType());
3508 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3510 "SCEVSMaxExpr operand types don't match!");
3518 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3521 while (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3524 getContext(),
APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt()));
3527 if (Ops.
size() == 1)
return Ops[0];
3528 LHSC = cast<SCEVConstant>(Ops[0]);
3532 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(
true)) {
3535 }
else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(
true)) {
3541 if (Ops.
size() == 1)
return Ops[0];
3550 if (Idx < Ops.
size()) {
3551 bool DeletedSMax =
false;
3552 while (
const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
3554 Ops.
append(SMax->op_begin(), SMax->op_end());
3559 return getSMaxExpr(Ops);
3565 for (
unsigned i = 0, e = Ops.
size()-1; i != e; ++i)
3568 if (Ops[i] == Ops[i+1] ||
3577 if (Ops.
size() == 1)
return Ops[0];
3579 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3585 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3586 ID.AddPointer(Ops[i]);
3588 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
3589 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(Ops.
size());
3590 std::uninitialized_copy(Ops.
begin(), Ops.
end(),
O);
3593 UniqueSCEVs.InsertNode(S, IP);
3594 addToLoopUseLists(S);
3601 return getUMaxExpr(Ops);
3607 if (Ops.
size() == 1)
return Ops[0];
3609 Type *ETy = getEffectiveSCEVType(Ops[0]->
getType());
3610 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3612 "SCEVUMaxExpr operand types don't match!");
3620 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3623 while (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3626 getContext(),
APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt()));
3629 if (Ops.
size() == 1)
return Ops[0];
3630 LHSC = cast<SCEVConstant>(Ops[0]);
3634 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(
false)) {
3637 }
else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(
false)) {
3643 if (Ops.
size() == 1)
return Ops[0];
3652 if (Idx < Ops.
size()) {
3653 bool DeletedUMax =
false;
3654 while (
const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
3656 Ops.
append(UMax->op_begin(), UMax->op_end());
3661 return getUMaxExpr(Ops);
3667 for (
unsigned i = 0, e = Ops.
size()-1; i != e; ++i)
3670 if (Ops[i] == Ops[i + 1] || isKnownViaNonRecursiveReasoning(
3680 if (Ops.
size() == 1)
return Ops[0];
3682 assert(!Ops.
empty() &&
"Reduced umax down to nothing!");
3688 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3689 ID.AddPointer(Ops[i]);
3691 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
return S;
3692 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(Ops.
size());
3693 std::uninitialized_copy(Ops.
begin(), Ops.
end(),
O);
3696 UniqueSCEVs.InsertNode(S, IP);
3697 addToLoopUseLists(S);
3704 return getSMinExpr(Ops);
3712 return getNotSCEV(getSMaxExpr(NotOps));
3718 return getUMinExpr(Ops);
3722 assert(!Ops.
empty() &&
"At least one operand must be!");
3724 if (Ops.
size() == 1)
3731 return getNotSCEV(getUMaxExpr(NotOps));
3738 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3748 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3761 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3762 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3763 "Stale SCEVUnknown in uniquing map!");
3768 FirstUnknown = cast<SCEVUnknown>(S);
3769 UniqueSCEVs.InsertNode(S, IP);
3789 assert(isSCEVable(Ty) &&
"Type is not SCEVable!");
3791 return getDataLayout().getIndexTypeSizeInBits(Ty);
3792 return getDataLayout().getTypeSizeInBits(Ty);
3799 assert(isSCEVable(Ty) &&
"Type is not SCEVable!");
3806 return getDataLayout().getIntPtrType(Ty);
3810 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ?
T1 : T2;
3814 return CouldNotCompute.get();
3817 bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
3820 return SU && SU->
getValue() ==
nullptr;
3823 return !ContainsNulls;
3828 if (I != HasRecMap.end())
3832 HasRecMap.insert({S, FoundAddRec});
3842 return {S,
nullptr};
3844 if (
Add->getNumOperands() != 2)
3845 return {S,
nullptr};
3849 return {S,
nullptr};
3851 return {
Add->getOperand(1), ConstOp->getValue()};
3857 ScalarEvolution::getSCEVValues(
const SCEV *S) {
3858 ExprValueMapType::iterator
SI = ExprValueMap.find_as(S);
3859 if (SI == ExprValueMap.end())
3864 for (
const auto &VE : SI->second)
3865 assert(ValueExprMap.count(VE.first));
3876 if (I != ValueExprMap.end()) {
3877 const SCEV *S = I->second;
3880 SV->remove({V, nullptr});
3883 const SCEV *Stripped;
3886 if (Offset !=
nullptr) {
3888 SV->remove({V, Offset});
3890 ValueExprMap.erase(V);
3898 if (
auto *
I = dyn_cast<Instruction>(V)) {
3899 if (isa<OverflowingBinaryOperator>(
I)) {
3900 if (
auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
3901 if (
I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
3903 if (
I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
3906 }
else if (isa<PossiblyExactOperator>(
I) &&
I->isExact())
3915 assert(isSCEVable(V->
getType()) &&
"Value is not SCEVable!");
3917 const SCEV *S = getExistingSCEV(V);
3923 std::pair<ValueExprMapType::iterator, bool> Pair =
3924 ValueExprMap.insert({SCEVCallbackVH(V,
this), S});
3926 ExprValueMap[S].insert({V,
nullptr});
3930 const SCEV *Stripped = S;
3938 if (Offset !=
nullptr && !isa<SCEVUnknown>(Stripped) &&
3939 !isa<GetElementPtrInst>(V))
3940 ExprValueMap[Stripped].insert({V, Offset});
3946 const SCEV *ScalarEvolution::getExistingSCEV(
Value *V) {
3947 assert(isSCEVable(V->
getType()) &&
"Value is not SCEVable!");
3950 if (I != ValueExprMap.end()) {
3951 const SCEV *S = I->second;
3952 if (checkValidity(S))
3954 eraseValueFromMap(V);
3955 forgetMemoizedResults(S);
3968 Ty = getEffectiveSCEVType(Ty);
3980 Ty = getEffectiveSCEVType(Ty);
3981 const SCEV *AllOnes =
3983 return getMinusSCEV(AllOnes, V);
3991 return getZero(LHS->
getType());
3996 const bool RHSIsNotMinSigned =
3997 !getSignedRangeMin(RHS).isMinSignedValue();
4022 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4029 "Cannot truncate or zero extend with non-integer arguments!");
4030 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4032 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4033 return getTruncateExpr(V, Ty);
4034 return getZeroExtendExpr(V, Ty);
4042 "Cannot truncate or zero extend with non-integer arguments!");
4043 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4045 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4046 return getTruncateExpr(V, Ty);
4047 return getSignExtendExpr(V, Ty);
4054 "Cannot noop or zero extend with non-integer arguments!");
4055 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4056 "getNoopOrZeroExtend cannot truncate!");
4057 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4059 return getZeroExtendExpr(V, Ty);
4066 "Cannot noop or sign extend with non-integer arguments!");
4067 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4068 "getNoopOrSignExtend cannot truncate!");
4069 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4071 return getSignExtendExpr(V, Ty);
4078 "Cannot noop or any extend with non-integer arguments!");
4079 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4080 "getNoopOrAnyExtend cannot truncate!");
4081 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4083 return getAnyExtendExpr(V, Ty);
4090 "Cannot truncate or noop with non-integer arguments!");
4091 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
4092 "getTruncateOrNoop cannot extend!");
4093 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4095 return getTruncateExpr(V, Ty);
4100 const SCEV *PromotedLHS = LHS;
4101 const SCEV *PromotedRHS = RHS;
4103 if (getTypeSizeInBits(LHS->
getType()) > getTypeSizeInBits(RHS->
getType()))
4104 PromotedRHS = getZeroExtendExpr(RHS, LHS->
getType());
4106 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->
getType());
4108 return getUMaxExpr(PromotedLHS, PromotedRHS);
4114 return getUMinFromMismatchedTypes(Ops);
4119 assert(!Ops.
empty() &&
"At least one operand must be!");
4121 if (Ops.
size() == 1)
4125 Type *MaxType =
nullptr;
4135 PromotedOps.
push_back(getNoopOrZeroExtend(S, MaxType));
4138 return getUMinExpr(PromotedOps);
4146 if (
const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
4147 return getPointerBase(Cast->getOperand());
4148 }
else if (
const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
4149 const SCEV *PtrOp =
nullptr;
4150 for (
const SCEV *NAryOp : NAry->operands()) {
4151 if (NAryOp->getType()->isPointerTy()) {
4160 return getPointerBase(PtrOp);
4171 Worklist.
push_back(cast<Instruction>(U));
4174 void ScalarEvolution::forgetSymbolicName(
Instruction *PN,
const SCEV *SymName) {
4180 while (!Worklist.
empty()) {
4182 if (!Visited.
insert(I).second)
4185 auto It = ValueExprMap.find_as(static_cast<Value *>(I));
4186 if (It != ValueExprMap.end()) {
4187 const SCEV *Old = It->second;
4191 if (Old != SymName && !hasOperand(Old, SymName))
4201 if (!isa<PHINode>(I) ||
4202 !isa<SCEVUnknown>(Old) ||
4203 (I != PN && Old == SymName)) {
4204 eraseValueFromMap(It->first);
4205 forgetMemoizedResults(Old);
4223 bool IgnoreOtherLoops =
true) {
4225 const SCEV *Result = Rewriter.visit(S);
4226 if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4228 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4235 SeenLoopVariantSCEVUnknown =
true;
4243 SeenOtherLoops =
true;
4247 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4249 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4256 bool SeenLoopVariantSCEVUnknown =
false;
4257 bool SeenOtherLoops =
false;
4267 SCEVPostIncRewriter
Rewriter(L, SE);
4268 const SCEV *Result = Rewriter.visit(S);
4269 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4276 SeenLoopVariantSCEVUnknown =
true;
4284 SeenOtherLoops =
true;
4288 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4290 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4297 bool SeenLoopVariantSCEVUnknown =
false;
4298 bool SeenOtherLoops =
false;
4304 class SCEVBackedgeConditionFolder
4307 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
4309 bool IsPosBECond =
false;
4310 Value *BECond =
nullptr;
4315 "Both outgoing branches should not target same header!");
4322 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
4323 return Rewriter.visit(S);
4327 const SCEV *Result = Expr;
4337 if (Res.hasValue()) {
4338 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4355 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
4358 IsPositiveBECond(IsPosBECond) {}
4364 Value *BackedgeCond =
nullptr;
4366 bool IsPositiveBECond;
4370 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
4375 if (BackedgeCond == IC)
4383 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
4386 const SCEV *Result = Rewriter.visit(S);
4404 bool isValid() {
return Valid; }
4417 ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
4431 if (NSWRegion.contains(AddRecRange))
4441 if (NUWRegion.contains(AddRecRange))
4467 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
4468 IsNSW = OBO->hasNoSignedWrap();
4469 IsNUW = OBO->hasNoUnsignedWrap();
4473 explicit BinaryOp(
unsigned Opcode,
Value *LHS,
Value *RHS,
bool IsNSW =
false,
4475 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
4490 switch (
Op->getOpcode()) {
4492 case Instruction::Sub:
4493 case Instruction::Mul:
4494 case Instruction::UDiv:
4495 case Instruction::URem:
4496 case Instruction::And:
4497 case Instruction::Or:
4498 case Instruction::AShr:
4499 case Instruction::Shl:
4500 return BinaryOp(
Op);
4502 case Instruction::Xor:
4503 if (
auto *RHSC = dyn_cast<ConstantInt>(
Op->getOperand(1)))
4506 if (RHSC->getValue().isSignMask())
4508 return BinaryOp(
Op);
4510 case Instruction::LShr:
4512 if (
ConstantInt *SA = dyn_cast<ConstantInt>(
Op->getOperand(1))) {
4519 if (SA->getValue().ult(BitWidth)) {
4523 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
4526 return BinaryOp(
Op);
4528 case Instruction::ExtractValue: {
4529 auto *EVI = cast<ExtractValueInst>(
Op);
4530 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
4537 if (
auto *
F = CI->getCalledFunction())
4538 switch (
F->getIntrinsicID()) {
4543 CI->getArgOperand(1));
4550 CI->getArgOperand(1),
true,
4554 CI->getArgOperand(1),
false,
4559 return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
4560 CI->getArgOperand(1));
4564 return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
4565 CI->getArgOperand(1),
true,
4568 return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
4569 CI->getArgOperand(1),
false,
4573 return BinaryOp(Instruction::Mul, CI->getArgOperand(0),
4574 CI->getArgOperand(1));
4611 if (Op == SymbolicPHI)
4616 if (SourceBits != NewBits)
4625 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
4629 if (X != SymbolicPHI)
4631 Signed = SExt !=
nullptr;
4698 ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
4704 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
4706 assert(L &&
"Expecting an integer loop header phi");
4711 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
4712 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
4713 Value *V = PN->getIncomingValue(i);
4714 if (L->contains(PN->getIncomingBlock(i))) {
4717 }
else if (BEValueV != V) {
4721 }
else if (!StartValueV) {
4723 }
else if (StartValueV != V) {
4724 StartValueV =
nullptr;
4728 if (!BEValueV || !StartValueV)
4731 const SCEV *BEValue = getSCEV(BEValueV);
4742 unsigned FoundIndex =
Add->getNumOperands();
4743 Type *TruncTy =
nullptr;
4745 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
4748 if (FoundIndex == e) {
4753 if (FoundIndex ==
Add->getNumOperands())
4758 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
4759 if (i != FoundIndex)
4761 const SCEV *Accum = getAddExpr(Ops);
4819 const SCEV *StartVal = getSCEV(StartValueV);
4820 const SCEV *PHISCEV =
4821 getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
4830 if (
const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
4834 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
4847 auto getExtendedExpr = [&](
const SCEV *Expr,
4848 bool CreateSignExtend) ->
const SCEV * {
4850 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
4851 const SCEV *ExtendedExpr =
4852 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->
getType())
4853 : getZeroExtendExpr(TruncatedExpr, Expr->
getType());
4854 return ExtendedExpr;
4862 auto PredIsKnownFalse = [&](
const SCEV *Expr,
4863 const SCEV *ExtendedExpr) ->
bool {
4864 return Expr != ExtendedExpr &&
4868 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
4869 if (PredIsKnownFalse(StartVal, StartExtended)) {
4876 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
4877 if (PredIsKnownFalse(Accum, AccumExtended)) {
4882 auto AppendPredicate = [&](
const SCEV *Expr,
4883 const SCEV *ExtendedExpr) ->
void {
4884 if (Expr != ExtendedExpr &&
4886 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
4892 AppendPredicate(StartVal, StartExtended);
4893 AppendPredicate(Accum, AccumExtended);
4901 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
4902 std::make_pair(NewAR, Predicates);
4904 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
4910 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
4916 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
4917 if (
I != PredicatedSCEVRewrites.end()) {
4918 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
4921 if (Rewrite.first == SymbolicPHI)
4925 assert(isa<SCEVAddRecExpr>(Rewrite.first) &&
"Expected an AddRec");
4926 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
4931 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
4936 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
4954 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
4955 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
4956 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
4973 const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
4975 Value *StartValueV) {
4978 assert(BEValueV && StartValueV);
4987 const SCEV *Accum =
nullptr;
4989 Accum = getSCEV(BO->RHS);
4991 Accum = getSCEV(BO->LHS);
5002 const SCEV *StartVal = getSCEV(StartValueV);
5003 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5005 ValueExprMap[SCEVCallbackVH(PN,
this)] = PHISCEV;
5010 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV))
5012 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5017 const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5025 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5031 }
else if (BEValueV != V) {
5035 }
else if (!StartValueV) {
5037 }
else if (StartValueV != V) {
5038 StartValueV =
nullptr;
5042 if (!BEValueV || !StartValueV)
5045 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5046 "PHI node already processed?");
5050 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5054 const SCEV *SymbolicName = getUnknown(PN);
5055 ValueExprMap.insert({SCEVCallbackVH(PN,
this), SymbolicName});
5059 const SCEV *BEValue = getSCEV(BEValueV);
5069 unsigned FoundIndex =
Add->getNumOperands();
5070 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5071 if (
Add->getOperand(i) == SymbolicName)
5072 if (FoundIndex == e) {
5077 if (FoundIndex !=
Add->getNumOperands()) {
5080 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5081 if (i != FoundIndex)
5082 Ops.
push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5084 const SCEV *Accum = getAddExpr(Ops);
5089 (isa<SCEVAddRecExpr>(Accum) &&
5090 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5107 if (
GEP->isInBounds() &&
GEP->getOperand(0) == PN) {
5110 const SCEV *Ptr = getSCEV(
GEP->getPointerOperand());
5120 const SCEV *StartVal = getSCEV(StartValueV);
5121 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5126 forgetSymbolicName(PN, SymbolicName);
5127 ValueExprMap[SCEVCallbackVH(PN,
this)] = PHISCEV;
5132 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV))
5134 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5149 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5150 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5151 if (Shifted != getCouldNotCompute() &&
5152 Start != getCouldNotCompute()) {
5153 const SCEV *StartVal = getSCEV(StartValueV);
5154 if (Start == StartVal) {
5158 forgetSymbolicName(PN, SymbolicName);
5159 ValueExprMap[SCEVCallbackVH(PN,
this)] = Shifted;
5169 eraseValueFromMap(PN);
5178 struct CheckAvailable {
5179 bool TraversalDone =
false;
5180 bool Available =
true;
5182 const Loop *L =
nullptr;
5187 : L(L), BB(BB), DT(DT) {}
5189 bool setUnavailable() {
5190 TraversalDone =
true;
5195 bool follow(
const SCEV *S) {
5208 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5209 if (L && (ARLoop == L || ARLoop->
contains(L)))
5212 return setUnavailable();
5217 const auto *SU = cast<SCEVUnknown>(S);
5218 Value *V = SU->getValue();
5220 if (isa<Argument>(V))
5223 if (isa<Instruction>(V) && DT.
dominates(cast<Instruction>(V), BB))
5226 return setUnavailable();
5232 return setUnavailable();
5237 bool isDone() {
return TraversalDone; }
5240 CheckAvailable CA(L, BB, DT);
5244 return CA.Available;
5257 if (!LeftEdge.isSingleEdge())
5260 assert(RightEdge.isSingleEdge() &&
"Follows from LeftEdge.isSingleEdge()");
5280 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
5282 [&](
BasicBlock *BB) {
return DT.isReachableFromEntry(BB); };
5304 assert(IDom &&
"At least the entry block should dominate PN");
5307 Value *Cond =
nullptr, *LHS =
nullptr, *RHS =
nullptr;
5309 if (BI && BI->isConditional() &&
5313 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5319 const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
5320 if (
const SCEV *S = createAddRecFromPHI(PN))
5323 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
5331 if (LI.replacementPreservesLCSSAForm(PN, V))
5335 return getUnknown(PN);
5344 if (
auto *CI = dyn_cast<ConstantInt>(Cond))
5345 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5350 return getUnknown(I);
5352 Value *LHS = ICI->getOperand(0);
5353 Value *RHS = ICI->getOperand(1);
5355 switch (ICI->getPredicate()) {
5364 if (getTypeSizeInBits(LHS->
getType()) <= getTypeSizeInBits(I->
getType())) {
5365 const SCEV *
LS = getNoopOrSignExtend(getSCEV(LHS), I->
getType());
5366 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->
getType());
5367 const SCEV *LA = getSCEV(TrueVal);
5368 const SCEV *
RA = getSCEV(FalseVal);
5369 const SCEV *LDiff = getMinusSCEV(LA, LS);
5370 const SCEV *RDiff = getMinusSCEV(RA, RS);
5372 return getAddExpr(getSMaxExpr(LS, RS), LDiff);
5373 LDiff = getMinusSCEV(LA, RS);
5374 RDiff = getMinusSCEV(RA, LS);
5376 return getAddExpr(getSMinExpr(LS, RS), LDiff);
5387 if (getTypeSizeInBits(LHS->
getType()) <= getTypeSizeInBits(I->
getType())) {
5388 const SCEV *
LS = getNoopOrZeroExtend(getSCEV(LHS), I->
getType());
5389 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->
getType());
5390 const SCEV *LA = getSCEV(TrueVal);
5391 const SCEV *
RA = getSCEV(FalseVal);
5392 const SCEV *LDiff = getMinusSCEV(LA, LS);
5393 const SCEV *RDiff = getMinusSCEV(RA, RS);
5395 return getAddExpr(getUMaxExpr(LS, RS), LDiff);
5396 LDiff = getMinusSCEV(LA, RS);
5397 RDiff = getMinusSCEV(RA, LS);
5399 return getAddExpr(getUMinExpr(LS, RS), LDiff);
5404 if (getTypeSizeInBits(LHS->
getType()) <= getTypeSizeInBits(I->
getType()) &&
5405 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()) {
5407 const SCEV *
LS = getNoopOrZeroExtend(getSCEV(LHS), I->
getType());
5408 const SCEV *LA = getSCEV(TrueVal);
5409 const SCEV *
RA = getSCEV(FalseVal);
5410 const SCEV *LDiff = getMinusSCEV(LA, LS);
5411 const SCEV *RDiff = getMinusSCEV(RA, One);
5413 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5418 if (getTypeSizeInBits(LHS->
getType()) <= getTypeSizeInBits(I->
getType()) &&
5419 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()) {
5421 const SCEV *
LS = getNoopOrZeroExtend(getSCEV(LHS), I->
getType());
5422 const SCEV *LA = getSCEV(TrueVal);
5423 const SCEV *
RA = getSCEV(FalseVal);
5424 const SCEV *LDiff = getMinusSCEV(LA, One);
5425 const SCEV *RDiff = getMinusSCEV(RA, LS);
5427 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5434 return getUnknown(I);
5442 return getUnknown(GEP);
5447 return getGEPExpr(GEP, IndexExprs);
5450 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(
const SCEV *S) {
5452 return C->getAPInt().countTrailingZeros();
5455 return std::min(GetMinTrailingZeros(
T->getOperand()),
5456 (
uint32_t)getTypeSizeInBits(
T->getType()));
5459 uint32_t OpRes = GetMinTrailingZeros(
E->getOperand());
5460 return OpRes == getTypeSizeInBits(
E->getOperand()->getType())
5461 ? getTypeSizeInBits(
E->getType())
5466 uint32_t OpRes = GetMinTrailingZeros(
E->getOperand());
5467 return OpRes == getTypeSizeInBits(
E->getOperand()->getType())
5468 ? getTypeSizeInBits(
E->getType())
5472 if (
const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
5474 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5475 for (
unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5476 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5480 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
5482 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
5483 uint32_t BitWidth = getTypeSizeInBits(M->getType());
5484 for (
unsigned i = 1, e = M->getNumOperands();
5485 SumOpRes != BitWidth && i != e; ++i)
5487 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
5493 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5494 for (
unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5495 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5499 if (
const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
5501 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5502 for (
unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5503 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5507 if (
const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
5509 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5510 for (
unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5511 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5515 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5526 auto I = MinTrailingZerosCache.find(S);
5527 if (I != MinTrailingZerosCache.end())
5530 uint32_t Result = GetMinTrailingZerosImpl(S);
5531 auto InsertPair = MinTrailingZerosCache.insert({S, Result});
5532 assert(InsertPair.second &&
"Should insert a new key");
5533 return InsertPair.first->second;
5549 ScalarEvolution::getRangeRef(
const SCEV *S,
5550 ScalarEvolution::RangeSignHint SignHint) {
5552 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
5557 if (I != Cache.
end())
5563 unsigned BitWidth = getTypeSizeInBits(S->
getType());
5568 uint32_t TZ = GetMinTrailingZeros(S);
5570 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
5571 ConservativeResult =
5582 for (
unsigned i = 1, e =
Add->getNumOperands(); i != e; ++i)
5583 X = X.
add(getRangeRef(
Add->getOperand(i), SignHint));
5584 return setRange(
Add, SignHint, ConservativeResult.intersectWith(X));
5587 if (
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
5589 for (
unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
5590 X = X.
multiply(getRangeRef(Mul->getOperand(i), SignHint));
5591 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X));
5594 if (
const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
5596 for (
unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
5597 X = X.
smax(getRangeRef(SMax->getOperand(i), SignHint));
5598 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X));
5601 if (
const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
5603 for (
unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
5604 X = X.
umax(getRangeRef(UMax->getOperand(i), SignHint));
5605 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X));
5608 if (
const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
5611 return setRange(UDiv, SignHint,
5612 ConservativeResult.intersectWith(X.
udiv(Y)));
5617 return setRange(ZExt, SignHint,
5618 ConservativeResult.intersectWith(X.
zeroExtend(BitWidth)));
5623 return setRange(SExt, SignHint,
5624 ConservativeResult.intersectWith(X.
signExtend(BitWidth)));
5629 return setRange(Trunc, SignHint,
5630 ConservativeResult.intersectWith(X.
truncate(BitWidth)));
5633 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
5636 if (AddRec->hasNoUnsignedWrap())
5637 if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(AddRec->getStart()))
5638 if (!
C->getValue()->isZero())
5639 ConservativeResult = ConservativeResult.intersectWith(
5644 if (AddRec->hasNoSignedWrap()) {
5645 bool AllNonNeg =
true;
5646 bool AllNonPos =
true;
5647 for (
unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
5649 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos =
false;
5652 ConservativeResult = ConservativeResult.intersectWith(
5656 ConservativeResult = ConservativeResult.intersectWith(
5658 APInt(BitWidth, 1)));
5662 if (AddRec->isAffine()) {
5663 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
5664 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
5665 getTypeSizeInBits(MaxBECount->
getType()) <= BitWidth) {
5666 auto RangeFromAffine = getRangeForAffineAR(
5667 AddRec->getStart(), AddRec->getStepRecurrence(*
this), MaxBECount,
5669 if (!RangeFromAffine.isFullSet())
5670 ConservativeResult =
5671 ConservativeResult.intersectWith(RangeFromAffine);
5673 auto RangeFromFactoring = getRangeViaFactoring(
5674 AddRec->getStart(), AddRec->getStepRecurrence(*
this), MaxBECount,
5676 if (!RangeFromFactoring.isFullSet())
5677 ConservativeResult =
5678 ConservativeResult.intersectWith(RangeFromFactoring);
5682 return setRange(AddRec, SignHint, std::move(ConservativeResult));
5685 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5689 ConservativeResult = ConservativeResult.intersectWith(MDRange.
getValue());
5695 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
5698 if (Known.
One != ~Known.
Zero + 1)
5699 ConservativeResult =
5703 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
5704 "generalize as needed!");
5707 ConservativeResult = ConservativeResult.intersectWith(
5713 if (
const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
5715 if (PendingPhiRanges.insert(Phi).second) {
5717 for (
auto &
Op : Phi->operands()) {
5718 auto OpRange = getRangeRef(getSCEV(
Op), SignHint);
5719 RangeFromOps = RangeFromOps.
unionWith(OpRange);
5724 ConservativeResult = ConservativeResult.
intersectWith(RangeFromOps);
5725 bool Erased = PendingPhiRanges.erase(Phi);
5726 assert(Erased &&
"Failed to erase Phi properly?");
5731 return setRange(U, SignHint, std::move(ConservativeResult));
5734 return setRange(S, SignHint, std::move(ConservativeResult));
5743 const APInt &MaxBECount,
5744 unsigned BitWidth,
bool Signed) {
5747 if (Step == 0 || MaxBECount == 0)
5758 bool Descending = Signed && Step.
isNegative();
5782 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
5783 : (StartUpper + std::move(Offset));
5788 if (StartRange.
contains(MovedBoundary))
5792 Descending ? std::move(MovedBoundary) : std::move(StartLower);
5794 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
5798 if (NewLower == NewUpper)
5802 return ConstantRange(std::move(NewLower), std::move(NewUpper));
5807 const SCEV *MaxBECount,
5808 unsigned BitWidth) {
5809 assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&
5810 getTypeSizeInBits(MaxBECount->
getType()) <= BitWidth &&
5813 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->
getType());
5814 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);
5824 MaxBECountValue, BitWidth,
true);
5826 StartSRange, MaxBECountValue,
5831 getUnsignedRangeMax(Step), getUnsignedRange(Start),
5832 MaxBECountValue, BitWidth,
false);
5835 return SR.intersectWith(UR);
5840 const SCEV *MaxBECount,
5841 unsigned BitWidth) {
5845 struct SelectPattern {
5846 Value *Condition =
nullptr;
5859 if (
auto *SA = dyn_cast<SCEVAddExpr>(S)) {
5862 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
5865 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
5866 S = SA->getOperand(1);
5870 if (
auto *SCast = dyn_cast<SCEVCastExpr>(S)) {
5871 CastOp = SCast->getSCEVType();
5872 S = SCast->getOperand();
5878 const APInt *TrueVal, *FalseVal;
5882 Condition =
nullptr;
5886 TrueValue = *TrueVal;
5887 FalseValue = *FalseVal;
5896 TrueValue = TrueValue.
trunc(BitWidth);
5897 FalseValue = FalseValue.
trunc(BitWidth);
5900 TrueValue = TrueValue.
zext(BitWidth);
5901 FalseValue = FalseValue.
zext(BitWidth);
5904 TrueValue = TrueValue.
sext(BitWidth);
5905 FalseValue = FalseValue.
sext(BitWidth);
5914 bool isRecognized() {
return Condition !=
nullptr; }
5917 SelectPattern StartPattern(*
this, BitWidth, Start);
5918 if (!StartPattern.isRecognized())
5921 SelectPattern StepPattern(*
this, BitWidth, Step);
5922 if (!StepPattern.isRecognized())
5925 if (StartPattern.Condition != StepPattern.Condition) {
5946 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
5948 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);
5969 bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *I) {
5975 Loop *InnermostContainingLoop = LI.getLoopFor(I->
getParent());
5976 if (InnermostContainingLoop ==
nullptr ||
5999 for (
unsigned OpIndex = 0; OpIndex < I->
getNumOperands(); ++OpIndex) {
6005 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
6006 bool AllOtherOpsLoopInvariant =
true;
6007 for (
unsigned OtherOpIndex = 0; OtherOpIndex < I->
getNumOperands();
6009 if (OtherOpIndex != OpIndex) {
6012 AllOtherOpsLoopInvariant =
false;
6017 if (AllOtherOpsLoopInvariant &&
6025 bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *I,
const Loop *L) {
6027 if (isSCEVExprNeverPoison(I))
6054 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
6066 bool LatchControlDependentOnPoison =
false;
6067 while (!PoisonStack.
empty() && !LatchControlDependentOnPoison) {
6070 for (
auto *PoisonUser : Poison->
users()) {
6072 if (Pushed.
insert(cast<Instruction>(PoisonUser)).second)
6073 PoisonStack.
push_back(cast<Instruction>(PoisonUser));
6074 }
else if (
auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
6075 assert(BI->isConditional() &&
"Only possibility!");
6076 if (BI->getParent() == LatchBB) {
6077 LatchControlDependentOnPoison =
true;
6084 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
6087 ScalarEvolution::LoopProperties
6088 ScalarEvolution::getLoopProperties(
const Loop *L) {
6089 using LoopProperties = ScalarEvolution::LoopProperties;
6091 auto Itr = LoopPropertiesCache.find(L);
6092 if (Itr == LoopPropertiesCache.end()) {
6094 if (
auto *
SI = dyn_cast<StoreInst>(I))
6095 return !
SI->isSimple();
6100 LoopProperties LP = {
true,
6104 for (
auto &I : *BB) {
6106 LP.HasNoAbnormalExits =
false;
6107 if (HasSideEffects(&I))
6108 LP.HasNoSideEffects =
false;
6109 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
6113 auto InsertPair = LoopPropertiesCache.insert({L, LP});
6114 assert(InsertPair.second &&
"We just checked!");
6115 Itr = InsertPair.first;
6121 const SCEV *ScalarEvolution::createSCEV(
Value *V) {
6122 if (!isSCEVable(V->
getType()))
6123 return getUnknown(V);
6130 if (!DT.isReachableFromEntry(I->
getParent()))
6131 return getUnknown(V);
6132 }
else if (
ConstantInt *CI = dyn_cast<ConstantInt>(V))
6134 else if (isa<ConstantPointerNull>(V))
6136 else if (
GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
6137 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
6138 else if (!isa<ConstantExpr>(V))
6139 return getUnknown(V);
6143 switch (BO->Opcode) {
6154 if (
auto *OpSCEV = getExistingSCEV(BO->Op)) {
6166 const SCEV *RHS = getSCEV(BO->RHS);
6169 const SCEV *LHS = getSCEV(BO->LHS);
6170 if (BO->Opcode == Instruction::Sub)
6171 AddOps.
push_back(getMinusSCEV(LHS, RHS, Flags));
6173 AddOps.
push_back(getAddExpr(LHS, RHS, Flags));
6178 if (BO->Opcode == Instruction::Sub)
6179 AddOps.
push_back(getNegativeSCEV(getSCEV(BO->RHS)));
6185 NewBO->Opcode != Instruction::Sub)) {
6192 return getAddExpr(AddOps);
6195 case Instruction::Mul: {
6199 if (
auto *OpSCEV = getExistingSCEV(BO->Op)) {
6207 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
6214 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
6221 return getMulExpr(MulOps);
6223 case Instruction::UDiv:
6224 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6225 case Instruction::URem:
6226 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6227 case Instruction::Sub: {
6230 Flags = getNoWrapFlagsFromUB(BO->Op);
6231 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
6233 case Instruction::And:
6236 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6238 return getSCEV(BO->RHS);
6239 if (CI->isMinusOne())
6240 return getSCEV(BO->LHS);
6241 const APInt &A = CI->getValue();
6252 0, &AC,
nullptr, &DT);
6254 APInt EffectiveMask =
6256 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.
Zero) & EffectiveMask)) {
6258 const SCEV *LHS = getSCEV(BO->LHS);
6259 const SCEV *ShiftedLHS =
nullptr;
6260 if (
auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
6261 if (
auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
6263 unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
6264 unsigned GCD = std::min(MulZeros, TZ);
6268 MulOps.
append(LHSMul->op_begin() + 1, LHSMul->op_end());
6269 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
6270 ShiftedLHS = getUDivExpr(NewMul,
getConstant(DivAmt));
6274 ShiftedLHS = getUDivExpr(LHS, MulCount);
6277 getTruncateExpr(ShiftedLHS,
6279 BO->LHS->getType()),
6285 case Instruction::Or:
6292 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6293 const SCEV *LHS = getSCEV(BO->LHS);
6294 const APInt &CIVal = CI->getValue();
6295 if (GetMinTrailingZeros(LHS) >=
6298 const SCEV *S = getAddExpr(LHS, getSCEV(CI));
6311 case Instruction::Xor:
6312 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6314 if (CI->isMinusOne())
6315 return getNotSCEV(getSCEV(BO->LHS));
6321 if (
auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
6322 if (
ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
6323 if (LBO->getOpcode() == Instruction::And &&
6324 LCI->getValue() == CI->getValue())
6326 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
6327 Type *UTy = BO->LHS->getType();
6328 const SCEV *Z0 =
Z->getOperand();
6330 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
6335 if (CI->getValue().isMask(Z0TySize))
6336 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
6341 APInt Trunc = CI->getValue().
trunc(Z0TySize);
6342 if (Trunc.
zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
6344 return getZeroExtendExpr(getAddExpr(Z0,
getConstant(Trunc)),
6350 case Instruction::Shl:
6352 if (
ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
6359 if (SA->getValue().uge(BitWidth))
6369 if (BO->Op && SA->getValue().ult(BitWidth - 1))
6370 Flags = getNoWrapFlagsFromUB(BO->Op);
6374 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
6378 case Instruction::AShr: {
6384 Type *OuterTy = BO->LHS->getType();
6385 uint64_t BitWidth = getTypeSizeInBits(OuterTy);
6394 return getSCEV(BO->LHS);
6400 if (L && L->
getOpcode() == Instruction::Shl) {
6409 return getSignExtendExpr(
6410 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);
6413 if (ShlAmtCI && ShlAmtCI->
getValue().
ult(BitWidth)) {
6415 if (ShlAmt > AShrAmt) {
6422 return getSignExtendExpr(
6423 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
6434 case Instruction::Trunc:
6437 case Instruction::ZExt:
6440 case Instruction::SExt:
6449 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
6451 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
6452 auto *
V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
6458 case Instruction::BitCast:
6469 case Instruction::GetElementPtr:
6470 return createNodeForGEP(cast<GEPOperator>(U));
6472 case Instruction::PHI:
6473 return createNodeForPHI(cast<PHINode>(U));
6480 if (isa<Instruction>(U))
6481 return createNodeForSelectOrPHI(cast<Instruction>(U), U->
getOperand(0),
6486 case Instruction::Invoke:
6492 return getUnknown(V);
6515 return getSmallConstantTripCount(L, ExitingBB);
6523 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
6525 "Exiting block must actually branch out of the loop!");
6532 const auto *MaxExitCount =
6539 return getSmallConstantTripMultiple(L, ExitingBB);
6560 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
6562 "Exiting block must actually branch out of the loop!");
6563 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
6564 if (ExitCount == getCouldNotCompute())
6568 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType()));
6575 return 1U << std::min((
uint32_t)31, GetMinTrailingZeros(TCExpr));
6594 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
6600 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
6604 return getBackedgeTakenInfo(L).getExact(L,
this);
6610 return getBackedgeTakenInfo(L).getMax(
this);
6614 return getBackedgeTakenInfo(L).isMaxOrZero(
this);
6627 const ScalarEvolution::BackedgeTakenInfo &
6628 ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
6629 auto &BTI = getBackedgeTakenInfo(L);
6630 if (BTI.hasFullInfo())
6633 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
6636 return Pair.first->second;
6638 BackedgeTakenInfo Result =
6639 computeBackedgeTakenCount(L,
true);
6641 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
6644 const ScalarEvolution::BackedgeTakenInfo &
6645 ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
6651 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
6652 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
6654 return Pair.first->second;
6659 BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
6662 (void)NumTripCountsComputed;
6663 (void)NumTripCountsNotComputed;
6664 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6665 const SCEV *BEExact = Result.getExact(L,
this);
6666 if (BEExact != getCouldNotCompute()) {
6669 "Computed backedge-taken count isn't loop invariant for loop!");
6670 ++NumTripCountsComputed;
6672 else if (Result.getMax(
this) == getCouldNotCompute() &&
6675 ++NumTripCountsNotComputed;
6677 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6684 if (Result.hasAnyInfo()) {
6689 while (!Worklist.
empty()) {
6692 ValueExprMapType::iterator It =
6693 ValueExprMap.find_as(static_cast<Value *>(I));
6694 if (It != ValueExprMap.end()) {
6695 const SCEV *Old = It->second;
6703 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
6704 eraseValueFromMap(It->first);
6705 forgetMemoizedResults(Old);
6707 if (
PHINode *PN = dyn_cast<PHINode>(I))
6708 ConstantEvolutionLoopExitValue.erase(PN);
6729 for (
auto *U : I->
users())
6730 if (
auto *I = dyn_cast<Instruction>(U)) {
6731 auto *LoopForUser = LI.getLoopFor(I->
getParent());
6732 if (LoopForUser && L->
contains(LoopForUser) &&
6733 Discovered.
insert(I).second)
6744 return BackedgeTakenCounts.find(L)->second = std::move(Result);
6749 auto RemoveLoopFromBackedgeMap =
6751 auto BTCPos = Map.
find(L);
6752 if (BTCPos != Map.
end()) {
6753 BTCPos->second.clear();
6763 while (!LoopWorklist.
empty()) {
6766 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL);
6767 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL);
6770 for (
auto I = PredicatedSCEVRewrites.begin();
6771 I != PredicatedSCEVRewrites.end();) {
6772 std::pair<const SCEV *, const Loop *> Entry = I->first;
6773 if (Entry.second == CurrL)
6774 PredicatedSCEVRewrites.erase(I++);
6779 auto LoopUsersItr = LoopUsers.find(CurrL);
6780 if (LoopUsersItr != LoopUsers.end()) {
6781 for (
auto *S : LoopUsersItr->second)
6782 forgetMemoizedResults(S);
6783 LoopUsers.erase(LoopUsersItr);
6789 while (!Worklist.
empty()) {
6791 if (!Visited.
insert(I).second)
6795 ValueExprMap.find_as(static_cast<Value *>(I));
6796 if (It != ValueExprMap.end()) {
6797 eraseValueFromMap(It->first);
6798 forgetMemoizedResults(It->second);
6799 if (
PHINode *PN = dyn_cast<PHINode>(I))
6800 ConstantEvolutionLoopExitValue.erase(PN);
6806 LoopPropertiesCache.erase(CurrL);
6809 LoopWorklist.
append(CurrL->begin(), CurrL->end());
6828 while (!Worklist.
empty()) {
6830 if (!Visited.
insert(I).second)
6834 ValueExprMap.find_as(static_cast<Value *>(I));
6835 if (It != ValueExprMap.end()) {
6836 eraseValueFromMap(It->first);
6837 forgetMemoizedResults(It->second);
6838 if (
PHINode *PN = dyn_cast<PHINode>(I))
6839 ConstantEvolutionLoopExitValue.erase(PN);
6856 if (!isComplete() || ExitNotTaken.empty())
6867 for (
auto &ENT : ExitNotTaken) {
6868 const SCEV *BECount = ENT.ExactNotTaken;
6871 "We should only have known counts for exiting blocks that dominate " 6876 if (Preds && !ENT.hasAlwaysTruePredicate())
6877 Preds->
add(ENT.Predicate.get());
6879 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
6880 "Predicate should be always true!");
6888 ScalarEvolution::BackedgeTakenInfo::getExact(
BasicBlock *ExitingBlock,
6890 for (
auto &ENT : ExitNotTaken)
6891 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
6892 return ENT.ExactNotTaken;
6899 ScalarEvolution::BackedgeTakenInfo::getMax(
ScalarEvolution *SE)
const {
6900 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
6901 return !ENT.hasAlwaysTruePredicate();
6904 if (
any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax())
6907 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) &&
6908 "No point in having a non-constant max backedge taken count!");
6912 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(
ScalarEvolution *SE)
const {
6913 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
6914 return !ENT.hasAlwaysTruePredicate();
6916 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
6919 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(
const SCEV *S,
6925 for (
auto &ENT : ExitNotTaken)
6933 ScalarEvolution::ExitLimit::ExitLimit(
const SCEV *
E)
6934 : ExactNotTaken(E), MaxNotTaken(E) {
6935 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
6936 isa<SCEVConstant>(MaxNotTaken)) &&
6937 "No point in having a non-constant max backedge taken count!");
6940 ScalarEvolution::ExitLimit::ExitLimit(
6941 const SCEV *E,
const SCEV *M,
bool MaxOrZero,
6943 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
6944 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
6945 !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
6946 "Exact is not allowed to be less precise than Max");
6947 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
6948 isa<SCEVConstant>(MaxNotTaken)) &&
6949 "No point in having a non-constant max backedge taken count!");
6950 for (
auto *PredSet : PredSetList)
6951 for (
auto *
P : *PredSet)
6955 ScalarEvolution::ExitLimit::ExitLimit(
6956 const SCEV *E,
const SCEV *M,
bool MaxOrZero,
6958 : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
6959 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
6960 isa<SCEVConstant>(MaxNotTaken)) &&
6961 "No point in having a non-constant max backedge taken count!");
6964 ScalarEvolution::ExitLimit::ExitLimit(
const SCEV *E,
const SCEV *M,
6966 : ExitLimit(E, M, MaxOrZero,
None) {
6967 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
6968 isa<SCEVConstant>(MaxNotTaken)) &&
6969 "No point in having a non-constant max backedge taken count!");
6974 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
6977 bool Complete,
const SCEV *MaxCount,
bool MaxOrZero)
6978 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) {
6979 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
6981 ExitNotTaken.reserve(ExitCounts.size());
6983 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
6984 [&](
const EdgeExitInfo &EEI) {
6986 const ExitLimit &EL = EEI.second;
6987 if (EL.Predicates.empty())
6988 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
nullptr);
6991 for (
auto *Pred : EL.Predicates)
6992 Predicate->add(Pred);
6994 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate));
6996 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) &&
6997 "No point in having a non-constant max backedge taken count!");
7002 ExitNotTaken.clear();
7006 ScalarEvolution::BackedgeTakenInfo
7007 ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
7008 bool AllowPredicates) {
7012 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7015 bool CouldComputeBECount =
true;
7017 const SCEV *MustExitMaxBECount =
nullptr;
7018 const SCEV *MayExitMaxBECount =
nullptr;
7019 bool MustExitMaxOrZero =
false;
7024 for (
unsigned i = 0, e = ExitingBlocks.
size(); i != e; ++i) {
7026 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
7028 assert((AllowPredicates || EL.Predicates.empty()) &&
7029 "Predicated exit limit when predicates are not allowed!");
7033 if (EL.ExactNotTaken == getCouldNotCompute())
7036 CouldComputeBECount =
false;
7050 if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
7051 DT.dominates(ExitBB, Latch)) {
7052 if (!MustExitMaxBECount) {
7053 MustExitMaxBECount = EL.MaxNotTaken;
7054 MustExitMaxOrZero = EL.MaxOrZero;
7056 MustExitMaxBECount =
7057 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
7059 }
else if (MayExitMaxBECount != getCouldNotCompute()) {
7060 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
7061 MayExitMaxBECount = EL.MaxNotTaken;
7064 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
7068 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
7069 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
7072 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.
size() == 1);
7073 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
7074 MaxBECount, MaxOrZero);
7077 ScalarEvolution::ExitLimit
7078 ScalarEvolution::computeExitLimit(
const Loop *L,
BasicBlock *ExitingBlock,
7079 bool AllowPredicates) {
7080 assert(L->
contains(ExitingBlock) &&
"Exit count for non-loop block?");
7084 if (!Latch || !DT.dominates(ExitingBlock, Latch))
7085 return getCouldNotCompute();
7089 if (
BranchInst *BI = dyn_cast<BranchInst>(Term)) {
7090 assert(BI->isConditional() &&
"If unconditional, it can't be in loop!");
7091 bool ExitIfTrue = !L->
contains(BI->getSuccessor(0));
7093 "It should have one successor in loop and one exit block!");
7095 return computeExitLimitFromCond(
7096 L, BI->getCondition(), ExitIfTrue,
7097 IsOnlyExit, AllowPredicates);
7106 return getCouldNotCompute();
7109 assert(Exit &&
"Exiting block must have at least one exit");
7110 return computeExitLimitFromSingleExitSwitch(L,
SI, Exit,
7114 return getCouldNotCompute();
7117 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
7118 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
7119 bool ControlsExit,
bool AllowPredicates) {
7120 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
7121 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
7122 ControlsExit, AllowPredicates);
7127 bool ExitIfTrue,
bool ControlsExit,
7128 bool AllowPredicates) {
7130 (void)this->ExitIfTrue;
7131 (void)this->AllowPredicates;
7133 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7134 this->AllowPredicates == AllowPredicates &&
7135 "Variance in assumed invariant key components!");
7136 auto Itr = TripCountMap.find({ExitCond, ControlsExit});
7137 if (Itr == TripCountMap.end())
7142 void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
7145 bool AllowPredicates,
7146 const ExitLimit &EL) {
7147 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7148 this->AllowPredicates == AllowPredicates &&
7149 "Variance in assumed invariant key components!");
7151 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
7152 assert(InsertResult.second &&
"Expected successful insertion!");
7157 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
7158 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
7159 bool ControlsExit,
bool AllowPredicates) {
7162 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7165 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
7166 ControlsExit, AllowPredicates);
7167 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
7171 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
7172 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
7173 bool ControlsExit,
bool AllowPredicates) {
7176 if (BO->getOpcode() == Instruction::And) {
7178 bool EitherMayExit = !ExitIfTrue;
7179 ExitLimit EL0 = computeExitLimitFromCondCached(
7180 Cache, L, BO->getOperand(0), ExitIfTrue,
7181 ControlsExit && !EitherMayExit, AllowPredicates);
7182 ExitLimit EL1 = computeExitLimitFromCondCached(
7183 Cache, L, BO->getOperand(1), ExitIfTrue,
7184 ControlsExit && !EitherMayExit, AllowPredicates);
7185 const SCEV *BECount = getCouldNotCompute();
7186 const SCEV *MaxBECount = getCouldNotCompute();
7187 if (EitherMayExit) {
7190 if (EL0.ExactNotTaken == getCouldNotCompute() ||
7191 EL1.ExactNotTaken == getCouldNotCompute())
7192 BECount = getCouldNotCompute();
7195 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
7196 if (EL0.MaxNotTaken == getCouldNotCompute())
7197 MaxBECount = EL1.MaxNotTaken;
7198 else if (EL1.MaxNotTaken == getCouldNotCompute())
7199 MaxBECount = EL0.MaxNotTaken;
7202 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
7206 if (EL0.MaxNotTaken == EL1.MaxNotTaken)
7207 MaxBECount = EL0.MaxNotTaken;
7208 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
7209 BECount = EL0.ExactNotTaken;
7217 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
7218 !isa<SCEVCouldNotCompute>(BECount))
7219 MaxBECount =
getConstant(getUnsignedRangeMax(BECount));
7221 return ExitLimit(BECount, MaxBECount,
false,
7222 {&EL0.Predicates, &EL1.Predicates});
7224 if (BO->getOpcode() == Instruction::Or) {
7226 bool EitherMayExit = ExitIfTrue;
7227 ExitLimit EL0 = computeExitLimitFromCondCached(
7228 Cache, L, BO->getOperand(0), ExitIfTrue,
7229 ControlsExit && !EitherMayExit, AllowPredicates);
7230 ExitLimit EL1 = computeExitLimitFromCondCached(
7231 Cache, L, BO->getOperand(1), ExitIfTrue,
7232 ControlsExit && !EitherMayExit, AllowPredicates);
7233 const SCEV *BECount = getCouldNotCompute();
7234 const SCEV *MaxBECount = getCouldNotCompute();
7235 if (EitherMayExit) {
7238 if (EL0.ExactNotTaken == getCouldNotCompute() ||
7239 EL1.ExactNotTaken == getCouldNotCompute())
7240 BECount = getCouldNotCompute();
7243 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
7244 if (EL0.MaxNotTaken == getCouldNotCompute())
7245 MaxBECount = EL1.MaxNotTaken;
7246 else if (EL1.MaxNotTaken == getCouldNotCompute())
7247 MaxBECount = EL0.MaxNotTaken;
7250 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
7254 if (EL0.MaxNotTaken == EL1.MaxNotTaken)
7255 MaxBECount = EL0.MaxNotTaken;
7256 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
7257 BECount = EL0.ExactNotTaken;
7260 return ExitLimit(BECount, MaxBECount,
false,
7261 {&EL0.Predicates, &EL1.Predicates});
7267 if (
ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
7269 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
7270 if (EL.hasFullInfo() || !AllowPredicates)
7274 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
7282 if (
ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
7283 if (ExitIfTrue == !CI->getZExtValue())
7285 return getCouldNotCompute();
7288 return getZero(CI->getType());
7292 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7295 ScalarEvolution::ExitLimit
7296 ScalarEvolution::computeExitLimitFromICmp(
const Loop *L,
7300 bool AllowPredicates) {
7313 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred);
7314 if (ItCnt.hasAnyInfo())
7322 LHS = getSCEVAtScope(LHS, L);
7323 RHS = getSCEVAtScope(RHS, L);
7334 (void)SimplifyICmpOperands(Pred, LHS, RHS);
7338 if (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
7339 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
7340 if (AddRec->getLoop() == L) {
7345 const SCEV *
Ret = AddRec->getNumIterationsInRange(CompRange, *
this);
7346 if (!isa<SCEVCouldNotCompute>(Ret))
return Ret;
7352 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
7354 if (EL.hasAnyInfo())
return EL;
7359 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
7360 if (EL.hasAnyInfo())
return EL;
7366 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
7368 if (EL.hasAnyInfo())
return EL;
7375 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
7377 if (EL.hasAnyInfo())
return EL;
7384 auto *ExhaustiveCount =
7385 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7387 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
7388 return ExhaustiveCount;
7390 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
7394 ScalarEvolution::ExitLimit
7395 ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
7398 bool ControlsExit) {
7403 return getCouldNotCompute();
7406 "Default case must not exit the loop!");
7411 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
7412 if (EL.hasAnyInfo())
7415 return getCouldNotCompute();
7423 assert(isa<SCEVConstant>(Val) &&
7424 "Evaluation of SCEV at constant didn't fold correctly?");
7425 return cast<SCEVConstant>(Val)->getValue();
7430 ScalarEvolution::ExitLimit
7431 ScalarEvolution::computeLoadConstantCompareExitLimit(
7436 if (LI->
isVolatile())
return getCouldNotCompute();
7441 if (!GEP)
return getCouldNotCompute();
7448 !cast<Constant>(GEP->
getOperand(1))->isNullValue())
7449 return getCouldNotCompute();
7452 Value *VarIdx =
nullptr;
7453 std::vector<Constant*> Indexes;
7454 unsigned VarIdxNum = 0;
7457 Indexes.push_back(CI);
7458 }
else if (!isa<ConstantInt>(GEP->
getOperand(i))) {
7459 if (VarIdx)
return getCouldNotCompute();
7462 Indexes.push_back(
nullptr);
7467 return getCouldNotCompute();
7471 const SCEV *Idx = getSCEV(VarIdx);
7472 Idx = getSCEVAtScope(Idx, L);
7478 !isa<SCEVConstant>(IdxExpr->
getOperand(0)) ||
7480 return getCouldNotCompute();
7483 for (
unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
7485 cast<IntegerType>(IdxExpr->
getType()), IterationNum);
7489 Indexes[VarIdxNum] = Val;
7497 if (!isa<ConstantInt>(Result))
break;
7498 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
7499 ++NumArrayLenItCounts;
7503 return getCouldNotCompute();
7506 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
7510 return getCouldNotCompute();
7514 return getCouldNotCompute();
7518 return getCouldNotCompute();
7522 auto MatchPositiveShift =
7529 OutOpCode = Instruction::LShr;
7531 OutOpCode = Instruction::AShr;
7533 OutOpCode = Instruction::Shl;
7548 auto MatchShiftRecurrence =
7565 if (MatchPositiveShift(LHS, V, OpC)) {
7566 PostShiftOpCode = OpC;
7572 if (!PNOut || PNOut->getParent() != L->
getHeader())
7575 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
7581 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
7588 (!PostShiftOpCode.
hasValue() || *PostShiftOpCode == OpCodeOut);
7593 if (!MatchShiftRecurrence(LHS, PN, OpCode))
7594 return getCouldNotCompute();
7610 case Instruction::AShr: {
7616 auto *
Ty = cast<IntegerType>(RHS->
getType());
7622 return getCouldNotCompute();
7626 case Instruction::LShr:
7627 case Instruction::Shl:
7636 assert(Result->getType()->isIntegerTy(1) &&
7637 "Otherwise cannot be an operand to a branch instruction");
7639 if (Result->isZeroValue()) {
7640 unsigned BitWidth = getTypeSizeInBits(RHS->
getType());
7641 const SCEV *UpperBound =
7643 return ExitLimit(getCouldNotCompute(), UpperBound,
false);
7646 return getCouldNotCompute();
7652 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
7653 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
7657 if (
const CallInst *CI = dyn_cast<CallInst>(I))
7658 if (
const Function *
F = CI->getCalledFunction())
7669 if (isa<PHINode>(I)) {
7693 if (isa<Constant>(
Op))
continue;
7703 P = PHIMap.
lookup(OpInst);
7712 if (PHI && PHI != P)
7729 if (
PHINode *PN = dyn_cast<PHINode>(I))
7746 if (
Constant *
C = dyn_cast<Constant>(V))
return C;
7748 if (!I)
return nullptr;
7759 if (isa<PHINode>(I))
return nullptr;
7767 if (!Operands[i])
return nullptr;
7772 if (!C)
return nullptr;
7776 if (
CmpInst *CI = dyn_cast<CmpInst>(I))
7778 Operands[1], DL, TLI);
7779 if (
LoadInst *LI = dyn_cast<LoadInst>(I)) {
7800 if (IncomingVal != CurrentVal) {
7803 IncomingVal = CurrentVal;
7815 ScalarEvolution::getConstantEvolutionLoopExitValue(
PHINode *PN,
7818 auto I = ConstantEvolutionLoopExitValue.find(PN);
7819 if (I != ConstantEvolutionLoopExitValue.end())
7823 return ConstantEvolutionLoopExitValue[PN] =
nullptr;
7825 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
7829 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
7837 CurrentIterVals[&PHI] = StartCST;
7839 if (!CurrentIterVals.
count(PN))
7840 return RetVal =
nullptr;
7846 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
7849 unsigned IterationNum = 0;
7851 for (; ; ++IterationNum) {
7852 if (IterationNum == NumIterations)
7853 return RetVal = CurrentIterVals[PN];
7862 NextIterVals[PN] = NextPHI;
7864 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
7870 for (
const auto &I : CurrentIterVals) {
7872 if (!PHI || PHI == PN || PHI->
getParent() != Header)
continue;
7877 for (
const auto &I : PHIsToCompute) {
7879 Constant *&NextPHI = NextIterVals[PHI];
7884 if (NextPHI != I.second)
7885 StoppedEvolving =
false;
7890 if (StoppedEvolving)
7891 return RetVal = CurrentIterVals[PN];
7893 CurrentIterVals.swap(NextIterVals);
7897 const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
7901 if (!PN)
return getCouldNotCompute();
7909 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
7912 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
7916 CurrentIterVals[&PHI] = StartCST;
7918 if (!CurrentIterVals.
count(PN))
7919 return getCouldNotCompute();
7926 for (
unsigned IterationNum = 0; IterationNum !=
MaxIterations;++IterationNum){
7927 auto *CondVal = dyn_cast_or_null<ConstantInt>(
7931 if (!CondVal)
return getCouldNotCompute();
7933 if (CondVal->getValue() == uint64_t(ExitWhen)) {
7934 ++NumBruteForceTripCountsComputed;
7945 for (
const auto &I : CurrentIterVals) {
7947 if (!PHI || PHI->
getParent() != Header)
continue;
7950 for (
PHINode *PHI : PHIsToCompute) {
7951 Constant *&NextPHI = NextIterVals[PHI];
7952 if (NextPHI)
continue;
7954 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
7957 CurrentIterVals.swap(NextIterVals);
7961 return getCouldNotCompute();
7968 for (
auto &
LS : Values)
7970 return LS.second ?
LS.second : V;
7972 Values.emplace_back(L,
nullptr);
7975 const SCEV *
C = computeSCEVAtScope(V, L);
7976 for (
auto &
LS :
reverse(ValuesAtScopes[V]))
7977 if (
LS.first == L) {
7989 switch (static_cast<SCEVTypes>(V->
getSCEVType())) {
7994 return cast<SCEVConstant>(V)->getValue();
8019 unsigned AS = PTy->getAddressSpace();
8025 if (!C2)
return nullptr;
8043 if (PTy->getElementType()->isStructTy())
8083 const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
8084 if (isa<SCEVConstant>(V))
return V;
8088 if (
const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
8089 if (
Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
8092 if (
PHINode *PN = dyn_cast<PHINode>(I))
8098 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
8100 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
8104 if (BTCC->getValue()->isZero()) {
8105 Value *InitValue =
nullptr;
8106 bool MultipleInitValues =
false;
8112 MultipleInitValues =
true;
8116 if (!MultipleInitValues && InitValue)
8117 return getSCEV(InitValue);
8124 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI);
8125 if (RV)
return getSCEV(RV);
8135 bool MadeImprovement =
false;
8148 const SCEV *OrigV = getSCEV(
Op);
8149 const SCEV *OpV = getSCEVAtScope(OrigV, L);
8150 MadeImprovement |= OrigV != OpV;
8163 if (MadeImprovement) {
8166 if (
const CmpInst *CI = dyn_cast<CmpInst>(I))
8168 Operands[1], DL, &TLI);
8169 else if (
const LoadInst *LI = dyn_cast<LoadInst>(I)) {
8170 if (!LI->isVolatile())
8187 for (
unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
8188 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8189 if (OpAtScope != Comm->getOperand(i)) {
8193 Comm->op_begin()+i);
8196 for (++i; i != e; ++i) {
8197 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8198 NewOps.push_back(OpAtScope);
8200 if (isa<SCEVAddExpr>(Comm))
8201 return getAddExpr(NewOps);
8202 if (isa<SCEVMulExpr>(Comm))
8203 return getMulExpr(NewOps);
8204 if (isa<SCEVSMaxExpr>(Comm))
8205 return getSMaxExpr(NewOps);
8206 if (isa<SCEVUMaxExpr>(Comm))
8207 return getUMaxExpr(NewOps);
8215 if (
const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
8216 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
8217 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
8218 if (LHS == Div->getLHS() && RHS == Div->getRHS())
8220 return getUDivExpr(LHS, RHS);
8225 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
8229 for (
unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
8230 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
8231 if (OpAtScope == AddRec->getOperand(i))
8237 AddRec->op_begin()+i);
8239 for (++i; i != e; ++i)
8240 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
8242 const SCEV *FoldedRec =
8243 getAddRecExpr(NewOps, AddRec->getLoop(),
8256 if (!AddRec->getLoop()->contains(L)) {
8259 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
8260 if (BackedgeTakenCount == getCouldNotCompute())
return AddRec;
8263 return AddRec->evaluateAtIteration(BackedgeTakenCount, *
this);
8270 const SCEV *
Op = getSCEVAtScope(Cast->getOperand(), L);
8271 if (Op == Cast->getOperand())
8273 return getZeroExtendExpr(Op, Cast->
getType());
8277 const SCEV *
Op = getSCEVAtScope(Cast->getOperand(), L);
8278 if (Op == Cast->getOperand())
8280 return getSignExtendExpr(Op, Cast->
getType());
8284 const SCEV *
Op = getSCEVAtScope(Cast->getOperand(), L);
8285 if (Op == Cast->getOperand())
8287 return getTruncateExpr(Op, Cast->
getType());
8294 return getSCEVAtScope(getSCEV(V), L);
8297 const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
8299 return stripInjectiveFunctions(ZExt->getOperand());
8301 return stripInjectiveFunctions(SExt->getOperand());
8317 assert(A != 0 &&
"A must be non-zero.");
8367 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: " 8368 << *AddRec <<
'\n');
8371 if (!LC || !MC || !NC) {
8372 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
8376 APInt L = LC->getAPInt();
8377 APInt M = MC->getAPInt();
8378 APInt N = NC->getAPInt();
8381 unsigned BitWidth = LC->getAPInt().getBitWidth();
8382 unsigned NewWidth = BitWidth + 1;
8384 << BitWidth <<
'\n');
8387 N = N.
sext(NewWidth);
8388 M = M.
sext(NewWidth);
8389 L = L.
sext(NewWidth);
8405 LLVM_DEBUG(
dbgs() << __func__ <<
": equation " << A <<
"x^2 + " << B
8406 <<
"x + " << C <<
", coeff bw: " << NewWidth
8407 <<
", multiplied by " << T <<
'\n');
8408 return std::make_tuple(A, B, C, T, BitWidth);
8420 return XW.
slt(YW) ? *
X : *
Y;
8442 if (BitWidth > 1 && BitWidth < W && X->
isIntN(BitWidth))
8443 return X->
trunc(BitWidth);
8469 std::tie(A, B, C, M, BitWidth) = *
T;
8470 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
8497 "Starting value of addrec should be 0");
8498 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range " 8499 << Range <<
", addrec " << *AddRec <<
'\n');
8503 "Addrec's initial value should be in range");
8522 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary " 8523 << Bound <<
" (before multiplying by " << M <<
")\n");
8529 "signed overflow\n");
8533 "unsigned overflow\n");
8537 auto LeavesRange = [&] (
const APInt &
X) {
8540 if (Range.
contains(V0->getValue()))
8545 if (Range.
contains(V1->getValue()))
8554 return {
None,
false };
8559 if (LeavesRange(*Min))
8560 return { Min,
true };
8562 if (LeavesRange(*Max))
8563 return { Max,
true };
8566 return {
None,
true };
8569 std::tie(A, B, C, M, BitWidth) = *
T;
8573 auto SL = SolveForBoundary(Lower);
8574 auto SU = SolveForBoundary(Upper);
8577 if (!SL.second || !SU.second)
8621 ScalarEvolution::ExitLimit
8622 ScalarEvolution::howFarToZero(
const SCEV *V,
const Loop *L,
bool ControlsExit,
8623 bool AllowPredicates) {
8634 if (
C->getValue()->isZero())
return C;
8635 return getCouldNotCompute();
8641 if (!AddRec && AllowPredicates)
8645 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
8647 if (!AddRec || AddRec->
getLoop() != L)
8648 return getCouldNotCompute();
8657 const auto *R = cast<SCEVConstant>(
getConstant(S.getValue()));
8658 return ExitLimit(R, R,
false, Predicates);
8660 return getCouldNotCompute();
8665 return getCouldNotCompute();
8689 if (!StepC || StepC->getValue()->isZero())
8690 return getCouldNotCompute();
8698 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
8703 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
8704 APInt MaxBECount = getUnsignedRangeMax(Distance);
8715 const SCEV *DistancePlusOne = getAddExpr(Distance, One);
8722 return ExitLimit(Distance,
getConstant(MaxBECount),
false, Predicates);
8731 loopHasNoAbnormalExits(AddRec->
getLoop())) {
8733 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
8735 Exact == getCouldNotCompute()
8738 return ExitLimit(Exact, Max,
false, Predicates);
8743 getNegativeSCEV(Start), *
this);
8744 const SCEV *M = E == getCouldNotCompute()
8747 return ExitLimit(E, M,
false, Predicates);
8750 ScalarEvolution::ExitLimit
8751 ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
8759 if (!
C->getValue()->isZero())
8761 return getCouldNotCompute();
8766 return getCouldNotCompute();
8769 std::pair<BasicBlock *, BasicBlock *>
8770 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
BasicBlock *BB) {
8780 if (
Loop *L = LI.getLoopFor(BB))
8783 return {
nullptr,
nullptr};
8792 if (A == B)
return true;
8798 return A->
isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
8803 if (
const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
8804 if (
const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
8805 if (
const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
8806 if (
const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
8807 if (ComputesEqualValues(AI, BI))
8815 const SCEV *&LHS,
const SCEV *&RHS,
8817 bool Changed =
false;
8820 auto TrivialCase = [&](
bool TriviallyTrue) {
8830 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
8832 if (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
8835 RHSC->getValue())->isNullValue())
8836 return TrivialCase(
false);
8838 return TrivialCase(
true);
8860 if (
const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
8861 const APInt &
RA = RC->getAPInt();
8863 bool SimplifiedByConstantRange =
false;
8868 return TrivialCase(
true);
8870 return TrivialCase(
false);
8879 Changed = SimplifiedByConstantRange =
true;
8883 if (!SimplifiedByConstantRange) {
8891 if (
const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
8893 dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
8894 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
8895 ME->getOperand(0)->isAllOnesValue()) {
8896 RHS = AE->getOperand(1);
8897 LHS = ME->getOperand(1);
8939 return TrivialCase(
true);
8941 return TrivialCase(
false);
8948 if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
8953 }
else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
8961 if (!getSignedRangeMin(RHS).isMinSignedValue()) {
8966 }
else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
8974 if (!getUnsignedRangeMax(RHS).isMaxValue()) {
8979 }
else if (!getUnsignedRangeMin(LHS).isMinValue()) {
8986 if (!getUnsignedRangeMin(RHS).isMinValue()) {
8990 }
else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
9006 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
9012 return getSignedRangeMax(S).isNegative();
9016 return getSignedRangeMin(S).isStrictlyPositive();
9020 return !getSignedRangeMin(S).isNegative();
9024 return !getSignedRangeMax(S).isStrictlyPositive();
9031 std::pair<const SCEV *, const SCEV *>
9034 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
9035 if (Start == getCouldNotCompute())
9036 return { Start, Start };
9038 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
9039 assert(PostInc != getCouldNotCompute() &&
"Unexpected could not compute");
9040 return { Start, PostInc };
9044 const SCEV *LHS,
const SCEV *RHS) {
9047 getUsedLoops(LHS, LoopsUsed);
9048 getUsedLoops(RHS, LoopsUsed);
9050 if (LoopsUsed.
empty())
9055 for (
auto *L1 : LoopsUsed)
9056 for (
auto *L2 : LoopsUsed)
9057 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
9058 DT.dominates(L2->getHeader(), L1->getHeader())) &&
9059 "Domination relationship is not a linear order");
9063 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
9064 [&](
const Loop *L1,
const Loop *L2) {
9065 return DT.properlyDominates(L1->
getHeader(), L2->getHeader());
9069 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
9071 if (SplitLHS.first == getCouldNotCompute())
9073 assert (SplitLHS.second != getCouldNotCompute() &&
"Unexpected CNC");
9075 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
9077 if (SplitRHS.first == getCouldNotCompute())
9079 assert (SplitRHS.second != getCouldNotCompute() &&
"Unexpected CNC");
9083 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
9084 !isAvailableAtLoopEntry(SplitRHS.first, MDL))
9087 return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) &&
9088 isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
9093 const SCEV *LHS,
const SCEV *RHS) {
9095 (void)SimplifyICmpOperands(Pred, LHS, RHS);
9097 if (isKnownViaInduction(Pred, LHS, RHS))
9100 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
9104 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
9111 return isLoopEntryGuardedByCond(L, Pred, LHS->
getStart(), RHS) &&
9112 isLoopBackedgeGuardedByCond(L, Pred, LHS->
getPostIncExpr(*
this), RHS);
9118 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing);
9123 bool IncreasingSwapped;
9124 bool ResultSwapped = isMonotonicPredicateImpl(
9127 assert(Result == ResultSwapped &&
"should be able to analyze both!");
9129 assert(Increasing == !IncreasingSwapped &&
9130 "monotonicity should flip as we flip the predicate");
9136 bool ScalarEvolution::isMonotonicPredicateImpl(
const SCEVAddRecExpr *LHS,
9178 if (isKnownNonPositive(Step)) {
9194 const SCEV *&InvariantRHS) {
9206 if (!ArLHS || ArLHS->
getLoop() != L)
9210 if (!isMonotonicPredicate(ArLHS, Pred, Increasing))
9233 if (!isLoopBackedgeGuardedByCond(L,
P, LHS, RHS))
9236 InvariantPred = Pred;
9242 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
9262 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
9263 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) ||
9267 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
9269 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
9277 auto MatchBinaryAddToConst =
9280 const SCEV *NonConstOp, *ConstOp;
9283 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) ||
9284 !isa<SCEVConstant>(ConstOp) || NonConstOp != X)
9287 OutY = cast<SCEVConstant>(ConstOp)->getAPInt();
9288 return (FlagsPresent & ExpectedFlags) == ExpectedFlags;
9351 bool ScalarEvolution::isImpliedViaGuard(
BasicBlock *BB,
9353 const SCEV *LHS,
const SCEV *RHS) {
9362 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
9364 isImpliedCond(Pred, LHS, RHS, Condition,
false);
9374 const SCEV *LHS,
const SCEV *RHS) {
9377 if (!L)
return true;
9381 "This cannot be done on broken IR!");
9384 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
9393 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
9394 isImpliedCond(Pred, LHS, RHS,
9401 if (WalkingBEDominatingConds)
9407 const auto &BETakenInfo = getBackedgeTakenInfo(L);
9408 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
9409 if (LatchBECount != getCouldNotCompute()) {
9415 const SCEV *LoopCounter =
9416 getAddRecExpr(getZero(Ty), getOne(Ty), L,
NoWrapFlags);
9423 for (
auto &AssumeVH : AC.assumptions()) {
9426 auto *CI = cast<CallInst>(AssumeVH);
9430 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
9437 if (!DT.isReachableFromEntry(L->
getHeader()))
9440 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
9444 DTN != HeaderDTN; DTN = DTN->
getIDom()) {
9445 assert(DTN &&
"should reach the loop header before reaching the root!");
9448 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
9456 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
9470 assert(DT.dominates(DominatingEdge, Latch) &&
"should be!");
9472 if (isImpliedCond(Pred, LHS, RHS, Condition,
9484 const SCEV *LHS,
const SCEV *RHS) {
9487 if (!L)
return false;
9491 "This cannot be done on broken IR!");
9494 assert(isAvailableAtLoopEntry(LHS, L) &&
9495 "LHS is not available at Loop Entry");
9496 assert(isAvailableAtLoopEntry(RHS, L) &&
9497 "RHS is not available at Loop Entry");
9499 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
9508 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
9509 bool ProvedNonStrictComparison =
false;
9510 bool ProvedNonEquality =
false;
9512 if (ProvingStrictComparison) {
9513 ProvedNonStrictComparison =
9514 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS);
9517 if (ProvedNonStrictComparison && ProvedNonEquality)
9522 auto ProveViaGuard = [&](
BasicBlock *Block) {
9523 if (isImpliedViaGuard(Block, Pred, LHS, RHS))
9525 if (ProvingStrictComparison) {
9526 if (!ProvedNonStrictComparison)
9527 ProvedNonStrictComparison =
9528 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS);
9529 if (!ProvedNonEquality)
9532 if (ProvedNonStrictComparison && ProvedNonEquality)
9539 auto ProveViaCond = [&](
Value *Condition,
bool Inverse) {
9540 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse))
9542 if (ProvingStrictComparison) {
9543 if (!ProvedNonStrictComparison)
9544 ProvedNonStrictComparison =
9545 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition,
Inverse);
9546 if (!ProvedNonEquality)
9549 if (ProvedNonStrictComparison && ProvedNonEquality)
9558 for (std::pair<BasicBlock *, BasicBlock *>
9561 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
9563 if (ProveViaGuard(Pair.first))
9568 if (!LoopEntryPredicate ||
9578 for (
auto &AssumeVH : AC.assumptions()) {
9581 auto *CI = cast<CallInst>(AssumeVH);
9585 if (ProveViaCond(CI->getArgOperand(0),
false))
9594 Value *FoundCondValue,
9596 if (!PendingLoopPredicates.insert(FoundCondValue).second)
9600 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
9603 if (
BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
9604 if (BO->getOpcode() == Instruction::And) {
9606 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
9607 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
9608 }
else if (BO->getOpcode() == Instruction::Or) {
9610 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
9611 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
9616 if (!ICI)
return false;
9624 FoundPred = ICI->getPredicate();
9626 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
9627 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
9629 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS);
9635 const SCEV *FoundLHS,
9636 const SCEV *FoundRHS) {
9638 if (getTypeSizeInBits(LHS->
getType()) <
9639 getTypeSizeInBits(FoundLHS->
getType())) {
9641 LHS = getSignExtendExpr(LHS, FoundLHS->
getType());
9642 RHS = getSignExtendExpr(RHS, FoundLHS->
getType());
9644 LHS = getZeroExtendExpr(LHS, FoundLHS->
getType());
9645 RHS = getZeroExtendExpr(RHS, FoundLHS->
getType());
9647 }
else if (getTypeSizeInBits(LHS->
getType()) >
9648 getTypeSizeInBits(FoundLHS->
getType())) {
9650 FoundLHS = getSignExtendExpr(FoundLHS, LHS->
getType());
9651 FoundRHS = getSignExtendExpr(FoundRHS, LHS->
getType());
9653 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->
getType());
9654 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->
getType());
9660 if (SimplifyICmpOperands(Pred, LHS, RHS))
9663 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
9664 if (FoundLHS == FoundRHS)
9668 if (LHS == FoundRHS || RHS == FoundLHS) {
9669 if (isa<SCEVConstant>(RHS)) {
9679 if (FoundPred == Pred)
9685 if (isa<SCEVConstant>(RHS))
9689 RHS, LHS, FoundLHS, FoundRHS);
9701 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
9704 const SCEV *V =
nullptr;
9706 if (isa<SCEVConstant>(FoundLHS)) {
9707 C = cast<SCEVConstant>(FoundLHS);
9710 C = cast<SCEVConstant>(FoundRHS);
9720 getSignedRangeMin(V) : getUnsignedRangeMin(V);
9727 APInt SharperMin = Min + 1;
9775 bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
9779 if (!AE || AE->getNumOperands() != 2)
9782 L = AE->getOperand(0);
9783 R = AE->getOperand(1);
9784 Flags = AE->getNoWrapFlags();
9793 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
9794 const auto *LAR = cast<SCEVAddRecExpr>(
Less);
9795 const auto *MAR = cast<SCEVAddRecExpr>(More);
9797 if (LAR->getLoop() != MAR->getLoop())
9802 if (!LAR->isAffine() || !MAR->isAffine())
9805 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
9808 Less = LAR->getStart();
9809 More = MAR->getStart();
9814 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
9815 const auto &M = cast<SCEVConstant>(More)->getAPInt();
9816 const auto &L = cast<SCEVConstant>(
Less)->getAPInt();
9821 const SCEV *LLess =
nullptr, *RLess =
nullptr;
9822 const SCEV *LMore =
nullptr, *RMore =
nullptr;
9825 if (splitBinaryAdd(Less, LLess, RLess, Flags))
9826 if ((C1 = dyn_cast<SCEVConstant>(LLess)))
9831 if (splitBinaryAdd(More, LMore, RMore, Flags))
9832 if ((C2 = dyn_cast<SCEVConstant>(LMore)))
9837 if (C1 && C2 && RLess == RMore)
9838 return C2->getAPInt() - C1->
getAPInt();
9843 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
9845 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
9854 if (!AddRecFoundLHS)
9861 const Loop *L = AddRecFoundLHS->getLoop();
9862 if (L != AddRecLHS->getLoop())
9899 if (!LDiff || !RDiff || *LDiff != *RDiff)
9905 APInt FoundRHSLimit;
9908 FoundRHSLimit = -(*RDiff);
9915 return isAvailableAtLoopEntry(FoundRHS, L) &&
9916 isLoopEntryGuardedByCond(L, Pred, FoundRHS,
9922 const SCEV *FoundLHS,
9924 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
9928 bool Erased = PendingMerges.erase(LPhi);
9929 assert(Erased &&
"Failed to erase LPhi!");
9933 bool Erased = PendingMerges.erase(RPhi);
9934 assert(Erased &&
"Failed to erase RPhi!");
9940 if (
const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
9941 if (
auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
9942 if (!PendingMerges.insert(Phi).second)
9946 if (
const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
9947 if (
auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
9956 if (!PendingMerges.insert(Phi).second)
9973 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
9977 auto ProvedEasily = [&](
const SCEV *S1,
const SCEV *S2) {
9978 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
9979 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
9980 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
9983 if (RPhi && RPhi->getParent() == LBB) {
9990 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
9991 if (!ProvedEasily(L, R))
10002 auto *RLoop = RAR->
getLoop();
10004 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
10006 if (!ProvedEasily(L1, RAR->
getStart()))
10008 auto *Latch = RLoop->getLoopLatch();
10009 assert(Latch &&
"Loop with AddRec with no latch?");
10020 if (!dominates(RHS, IncBB))
10023 if (!ProvedEasily(L, RHS))
10032 const SCEV *FoundLHS,
10033 const SCEV *FoundRHS) {
10034 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
10037 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
10040 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
10041 FoundLHS, FoundRHS) ||
10043 isImpliedCondOperandsHelper(Pred, LHS, RHS,
10044 getNotSCEV(FoundRHS),
10045 getNotSCEV(FoundLHS));
10064 template<
typename MaxExprType>
10066 const SCEV *Candidate) {
10067 const MaxExprType *MaxExpr =
dyn_cast<MaxExprType>(MaybeMaxExpr);
10068 if (!MaxExpr)
return false;
10070 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end();
10074 template<
typename MaxExprType>
10076 const SCEV *MaybeMinExpr,
10077 const SCEV *Candidate) {
10082 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.
getNotSCEV(Candidate));
10087 const SCEV *LHS,
const SCEV *RHS) {
10121 const SCEV *LHS,
const SCEV *RHS) {
10132 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) ||
10134 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
10142 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) ||
10144 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
10152 const SCEV *FoundLHS,
10153 const SCEV *FoundRHS,
10156 getTypeSizeInBits(RHS->
getType()) &&
10157 "LHS and RHS have different sizes?");
10159 getTypeSizeInBits(FoundRHS->
getType()) &&
10160 "FoundLHS and FoundRHS have different sizes?");
10174 auto GetOpFromSExt = [&](
const SCEV *S) {
10175 if (
auto *
Ext = dyn_cast<SCEVSignExtendExpr>(S))
10176 return Ext->getOperand();
10183 auto *OrigLHS = LHS;
10184 auto *OrigFoundLHS = FoundLHS;
10185 LHS = GetOpFromSExt(LHS);
10186 FoundLHS = GetOpFromSExt(FoundLHS);
10189 auto IsSGTViaContext = [&](
const SCEV *S1,
const SCEV *S2) {
10192 FoundRHS, Depth + 1);
10195 if (
auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
10201 if (getTypeSizeInBits(LHS->
getType()) != getTypeSizeInBits(RHS->
getType()))
10205 if (!LHSAddExpr->hasNoSignedWrap())
10208 auto *LL = LHSAddExpr->getOperand(0);
10209 auto *LR = LHSAddExpr->getOperand(1);
10210 auto *MinusOne = getNegativeSCEV(getOne(RHS->
getType()));
10213 auto IsSumGreaterThanRHS = [&](
const SCEV *S1,
const SCEV *S2) {
10214 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
10219 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
10221 }
else if (
auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
10236 if (!isa<ConstantInt>(LR))
10239 auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
10243 auto *Numerator = getExistingSCEV(LL);
10244 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
10252 auto *DTy = Denominator->getType();
10253 auto *FRHSTy = FoundRHS->
getType();
10254 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
10264 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
10265 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
10271 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt,
getConstant(WTy, 2));
10272 if (isKnownNonPositive(RHS) &&
10273 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
10283 auto *MinusOne = getNegativeSCEV(getOne(WTy));
10284 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
10286 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
10294 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
10302 const SCEV *LHS,
const SCEV *RHS) {
10303 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
10306 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
10312 const SCEV *FoundLHS,
10313 const SCEV *FoundRHS) {
10348 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
10357 const SCEV *FoundLHS,
10358 const SCEV *FoundRHS) {
10359 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
10368 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
10380 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
10386 return SatisfyingLHSRange.
contains(LHSRange);
10389 bool ScalarEvolution::doesIVOverflowOnLT(
const SCEV *RHS,
const SCEV *Stride,
10390 bool IsSigned,
bool NoWrap) {
10393 if (NoWrap)
return false;
10395 unsigned BitWidth = getTypeSizeInBits(RHS->
getType());
10399 APInt MaxRHS = getSignedRangeMax(RHS);
10401 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
10404 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
10407 APInt MaxRHS = getUnsignedRangeMax(RHS);
10409 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
10412 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
10415 bool ScalarEvolution::doesIVOverflowOnGT(
const SCEV *RHS,
const SCEV *Stride,
10416 bool IsSigned,
bool NoWrap) {
10417 if (NoWrap)
return false;
10419 unsigned BitWidth = getTypeSizeInBits(RHS->
getType());
10423 APInt MinRHS = getSignedRangeMin(RHS);
10425 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
10428 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
10431 APInt MinRHS = getUnsignedRangeMin(RHS);
10433 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
10436 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
10439 const SCEV *ScalarEvolution::computeBECount(
const SCEV *Delta,
const SCEV *Step,
10442 Delta = Equality ? getAddExpr(Delta, Step)
10443 : getAddExpr(Delta, getMinusSCEV(Step, One));
10444 return getUDivExpr(Delta, Step);
10447 const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
10448 const SCEV *Stride,
10453 assert(!isKnownNonPositive(Stride) &&
10454 "Stride is expected strictly positive!");
10457 const SCEV *MaxBECount;
10459 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
10461 APInt StrideForMaxBECount =
10462 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
10469 APInt One(BitWidth, 1, IsSigned);
10474 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
10483 MaxBECount = computeBECount(
getConstant(MaxEnd - MinStart) ,
10490 ScalarEvolution::ExitLimit
10491 ScalarEvolution::howManyLessThans(
const SCEV *LHS,
const SCEV *RHS,
10492 const Loop *L,
bool IsSigned,
10493 bool ControlsExit,
bool AllowPredicates) {
10497 bool PredicatedIV =
false;
10499 if (!IV && AllowPredicates) {
10503 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
10504 PredicatedIV =
true;
10509 return getCouldNotCompute();
10511 bool NoWrap = ControlsExit &&
10519 if (!PositiveStride) {
10562 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
10563 !loopHasNoSideEffects(L))
10564 return getCouldNotCompute();
10565 }
else if (!Stride->
isOne() &&
10566 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
10571 return getCouldNotCompute();
10576 const SCEV *End = RHS;
10583 const SCEV *MaxBECount = computeMaxBECountForLT(
10584 Start, Stride, RHS, getTypeSizeInBits(LHS->
getType()), IsSigned);
10585 return ExitLimit(getCouldNotCompute() , MaxBECount,
10586 false , Predicates);
10592 const SCEV *BECountIfBackedgeTaken =
10593 computeBECount(getMinusSCEV(End, Start), Stride,
false);
10601 const SCEV *BECount;
10602 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
10603 BECount = BECountIfBackedgeTaken;
10605 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
10606 BECount = computeBECount(getMinusSCEV(End, Start), Stride,
false);
10609 const SCEV *MaxBECount;
10610 bool MaxOrZero =
false;
10611 if (isa<SCEVConstant>(BECount))
10612 MaxBECount = BECount;
10613 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) {
10617 MaxBECount = BECountIfBackedgeTaken;
10620 MaxBECount = computeMaxBECountForLT(
10621 Start, Stride, RHS, getTypeSizeInBits(LHS->
getType()), IsSigned);
10624 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
10625 !isa<SCEVCouldNotCompute>(BECount))
10626 MaxBECount =
getConstant(getUnsignedRangeMax(BECount));
10628 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
10631 ScalarEvolution::ExitLimit
10632 ScalarEvolution::howManyGreaterThans(
const SCEV *LHS,
const SCEV *RHS,
10633 const Loop *L,
bool IsSigned,
10634 bool ControlsExit,
bool AllowPredicates) {
10638 return getCouldNotCompute();
10641 if (!IV && AllowPredicates)
10645 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
10649 return getCouldNotCompute();
10651 bool NoWrap = ControlsExit &&
10658 return getCouldNotCompute();
10664 if (!Stride->
isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
10665 return getCouldNotCompute();
10671 const SCEV *End = RHS;
10672 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS))
10673 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
10675 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride,
false);
10677 APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
10678 : getUnsignedRangeMax(Start);
10680 APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
10681 : getUnsignedRangeMin(Stride);
10683 unsigned BitWidth = getTypeSizeInBits(LHS->
getType());
10695 const SCEV *MaxBECount = getCouldNotCompute();
10696 if (isa<SCEVConstant>(BECount))
10697 MaxBECount = BECount;
10699 MaxBECount = computeBECount(
getConstant(MaxStart - MinEnd),
10702 if (isa<SCEVCouldNotCompute>(MaxBECount))
10703 MaxBECount = BECount;
10705 return ExitLimit(BECount, MaxBECount,
false, Predicates);
10714 if (
const SCEVConstant *
SC = dyn_cast<SCEVConstant>(getStart()))
10715 if (!
SC->getValue()->isZero()) {
10717 Operands[0] = SE.
getZero(
SC->getType());
10719 getNoWrapFlags(
FlagNW));
10720 if (
const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
10721 return ShiftedAddRec->getNumIterationsInRange(
10729 if (
any_of(operands(), [](
const SCEV *
Op) {
return !isa<SCEVConstant>(
Op); }))
10753 APInt ExitVal = (End + A).udiv(A);
10760 if (Range.
contains(Val->getValue()))
10767 "Linear scev computation is off in a bad way!");
10771 if (isQuadratic()) {
10781 assert(getNumOperands() > 1 &&
"AddRec with zero step?");
10791 for (
unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
10798 assert(!Last->
isZero() &&
"Recurrency with zero step?");
10800 return cast<SCEVAddRecExpr>(SE.
getAddRecExpr(Ops, getLoop(),
10807 if (
const auto *SU = dyn_cast<SCEVUnknown>(S))
10808 return isa<UndefValue>(SU->getValue());
10809 else if (
const auto *
SC = dyn_cast<SCEVConstant>(S))
10810 return isa<UndefValue>(
SC->getValue());
10818 struct SCEVCollectStrides {
10823 : SE(SE), Strides(S) {}
10825 bool follow(
const SCEV *S) {
10831 bool isDone()
const {
return false; }
10835 struct SCEVCollectTerms {
10840 bool follow(
const SCEV *S) {
10841 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) ||
10842 isa<SCEVSignExtendExpr>(S)) {
10854 bool isDone()
const {
return false; }
10858 struct SCEVHasAddRec {
10859 bool &ContainsAddRec;
10861 SCEVHasAddRec(
bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) {
10862 ContainsAddRec =
false;
10865 bool follow(
const SCEV *S) {
10866 if (isa<SCEVAddRecExpr>(S)) {
10867 ContainsAddRec =
true;
10877 bool isDone()
const {
return false; }
10892 struct SCEVCollectAddRecMultiplies {
10897 : Terms(T), SE(SE) {}
10899 bool follow(
const SCEV *S) {
10900 if (
auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
10901 bool HasAddRec =
false;
10903 for (
auto Op : Mul->operands()) {
10905 if (Unknown && !isa<CallInst>(Unknown->
getValue())) {
10907 }
else if (Unknown) {
10910 bool ContainsAddRec;
10911 SCEVHasAddRec ContiansAddRec(ContainsAddRec);
10913 HasAddRec |= ContainsAddRec;
10916 if (Operands.
size() == 0)
10931 bool isDone()
const {
return false; }
10943 SCEVCollectStrides StrideCollector(*
this, Strides);
10947 dbgs() <<
"Strides:\n";
10948 for (
const SCEV *S : Strides)
10949 dbgs() << *S <<
"\n";
10952 for (
const SCEV *S : Strides) {
10953 SCEVCollectTerms TermCollector(Terms);
10958 dbgs() <<
"Terms:\n";
10959 for (
const SCEV *
T : Terms)
10960 dbgs() << *
T <<
"\n";
10963 SCEVCollectAddRecMultiplies MulCollector(Terms, *
this);
10970 int Last = Terms.
size() - 1;
10971 const SCEV *Step = Terms[Last];
10975 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
10977 for (
const SCEV *
Op : M->operands())
10978 if (!isa<SCEVConstant>(
Op))
10988 for (
const SCEV *&Term : Terms) {
10991 SCEVDivision::divide(SE, Term, Step, &Q, &R);
11002 remove_if(Terms, [](
const SCEV *E) {
return isa<SCEVConstant>(
E); }),
11005 if (Terms.size() > 0)
11015 for (
const SCEV *
T : Terms)
11023 if (
const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
11024 return Expr->getNumOperands();
11029 if (isa<SCEVConstant>(T))
11032 if (isa<SCEVUnknown>(T))
11035 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
11037 for (
const SCEV *
Op : M->operands())
11038 if (!isa<SCEVConstant>(
Op))
11051 Ty =
Store->getValueOperand()->getType();
11053 Ty =
Load->getType();
11058 return getSizeOfExpr(ETy, Ty);
11063 const SCEV *ElementSize) {
11064 if (Terms.
size() < 1 || !ElementSize)
11073 dbgs() <<
"Terms:\n";
11074 for (
const SCEV *
T : Terms)
11075 dbgs() << *
T <<
"\n";
11089 for (
const SCEV *&Term : Terms) {
11091 SCEVDivision::divide(*
this, Term, ElementSize, &Q, &R);
11099 for (
const SCEV *
T : Terms)
11104 dbgs() <<
"Terms after sorting:\n";
11105 for (
const SCEV *
T : NewTerms)
11106 dbgs() << *
T <<
"\n";
11118 dbgs() <<
"Sizes:\n";
11119 for (
const SCEV *S : Sizes)
11120 dbgs() << *S <<
"\n";
11131 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(Expr))
11135 const SCEV *Res = Expr;
11136 int Last = Sizes.
size() - 1;
11137 for (
int i = Last; i >= 0; i--) {
11139 SCEVDivision::divide(*
this, Res, Sizes[i], &Q, &R);
11142 dbgs() <<
"Res: " << *Res <<
"\n";
11143 dbgs() <<
"Sizes[i]: " << *Sizes[i] <<
"\n";
11144 dbgs() <<
"Res divided by Sizes[i]:\n";
11145 dbgs() <<
"Quotient: " << *Q <<
"\n";
11146 dbgs() <<
"Remainder: " << *R <<
"\n";
11156 if (isa<SCEVAddRecExpr>(R)) {
11157 Subscripts.
clear();
11176 dbgs() <<
"Subscripts:\n";
11177 for (
const SCEV *S : Subscripts)
11178 dbgs() << *S <<
"\n";
11233 const SCEV *ElementSize) {
11236 collectParametricTerms(Expr, Terms);
11242 findArrayDimensions(Terms, Sizes, ElementSize);
11248 computeAccessFunctions(Expr, Subscripts, Sizes);
11250 if (Subscripts.
empty())
11254 dbgs() <<
"succeeded to delinearize " << *Expr <<
"\n";
11255 dbgs() <<
"ArrayDecl[UnknownSize]";
11256 for (
const SCEV *S : Sizes)
11257 dbgs() <<
"[" << *S <<
"]";
11259 dbgs() <<
"\nArrayRef";
11260 for (
const SCEV *S : Subscripts)
11261 dbgs() <<
"[" << *S <<
"]";
11270 void ScalarEvolution::SCEVCallbackVH::deleted() {
11271 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
11272 if (
PHINode *PN = dyn_cast<PHINode>(getValPtr()))
11273 SE->ConstantEvolutionLoopExitValue.erase(PN);
11278 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
11279 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
11284 Value *Old = getValPtr();
11287 while (!Worklist.empty()) {
11288 User *U = Worklist.pop_back_val();
11293 if (!Visited.
insert(U).second)
11295 if (
PHINode *PN = dyn_cast<PHINode>(U))
11296 SE->ConstantEvolutionLoopExitValue.erase(PN);
11301 if (
PHINode *PN = dyn_cast<PHINode>(Old))
11302 SE->ConstantEvolutionLoopExitValue.erase(PN);
11317 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
11319 LoopDispositions(64), BlockDispositions(64) {
11332 HasGuards = GuardDecl && !GuardDecl->use_empty();
11336 : F(
Arg.F), HasGuards(
Arg.HasGuards), TLI(
Arg.TLI), AC(
Arg.AC), DT(
Arg.DT),
11337 LI(
Arg.LI), CouldNotCompute(
std::move(
Arg.CouldNotCompute)),
11338 ValueExprMap(
std::move(
Arg.ValueExprMap)),
11339 PendingLoopPredicates(
std::move(
Arg.PendingLoopPredicates)),
11340 PendingPhiRanges(
std::move(
Arg.PendingPhiRanges)),
11341 PendingMerges(
std::move(
Arg.PendingMerges)),
11342 MinTrailingZerosCache(
std::move(
Arg.MinTrailingZerosCache)),
11343 BackedgeTakenCounts(
std::move(
Arg.BackedgeTakenCounts)),
11344 PredicatedBackedgeTakenCounts(
11345 std::move(
Arg.PredicatedBackedgeTakenCounts)),
11346 ConstantEvolutionLoopExitValue(
11347 std::move(
Arg.ConstantEvolutionLoopExitValue)),
11348 ValuesAtScopes(
std::move(
Arg.ValuesAtScopes)),
11349 LoopDispositions(
std::move(
Arg.LoopDispositions)),
11350 LoopPropertiesCache(
std::move(
Arg.LoopPropertiesCache)),
11351 BlockDispositions(
std::move(
Arg.BlockDispositions)),
11352 UnsignedRanges(
std::move(
Arg.UnsignedRanges)),
11353 SignedRanges(
std::move(
Arg.SignedRanges)),
11354 UniqueSCEVs(
std::move(
Arg.UniqueSCEVs)),
11355 UniquePreds(
std::move(
Arg.UniquePreds)),
11356 SCEVAllocator(
std::move(
Arg.SCEVAllocator)),
11357 LoopUsers(
std::move(
Arg.LoopUsers)),
11358 PredicatedSCEVRewrites(
std::move(
Arg.PredicatedSCEVRewrites)),
11359 FirstUnknown(
Arg.FirstUnknown) {
11360 Arg.FirstUnknown =
nullptr;
11369 Tmp->~SCEVUnknown();
11371 FirstUnknown =
nullptr;
11373 ExprValueMap.
clear();
11374 ValueExprMap.
clear();
11379 for (
auto &BTCI : BackedgeTakenCounts)
11380 BTCI.second.clear();
11381 for (
auto &BTCI : PredicatedBackedgeTakenCounts)
11382 BTCI.second.clear();
11384 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
11385 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
11386 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
11387 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
11388 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
11402 L->getHeader()->printAsOperand(OS,
false);
11406 L->getExitBlocks(ExitBlocks);
11407 if (ExitBlocks.
size() != 1)
11408 OS <<
"<multiple exits> ";
11413 OS <<
"Unpredictable backedge-taken count. ";
11418 L->getHeader()->printAsOperand(OS,
false);
11424 OS <<
", actual taken count either this or zero.";
11426 OS <<
"Unpredictable max backedge-taken count. ";
11431 L->getHeader()->printAsOperand(OS,
false);
11436 if (!isa<SCEVCouldNotCompute>(PBT)) {
11437 OS <<
"Predicated backedge-taken count is " << *PBT <<
"\n";
11438 OS <<
" Predicates:\n";
11441 OS <<
"Unpredictable predicated backedge-taken count. ";
11447 L->getHeader()->printAsOperand(OS,
false);
11458 return "Invariant";
11460 return "Computable";
11474 OS <<
"Classifying expressions for: ";
11483 if (!isa<SCEVCouldNotCompute>(SV)) {
11490 const Loop *L = LI.getLoopFor(I.getParent());
11496 if (!isa<SCEVCouldNotCompute>(AtUse)) {
11505 OS <<
"\t\t" "Exits: ";
11508 OS <<
"<<Unknown>>";
11516 OS <<
"\t\t" "LoopDispositions: { ";
11522 Iter->getHeader()->printAsOperand(OS,
false);
11530 OS <<
"\t\t" "LoopDispositions: { ";
11536 InnerL->getHeader()->printAsOperand(OS,
false);
11546 OS <<
"Determining loop execution counts for: ";
11555 auto &Values = LoopDispositions[S];
11556 for (
auto &V : Values) {
11557 if (V.getPointer() == L)
11562 auto &Values2 = LoopDispositions[S];
11563 for (
auto &V :
make_range(Values2.rbegin(), Values2.rend())) {
11564 if (V.getPointer() == L) {
11573 ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
11574 switch (static_cast<SCEVTypes>(S->
getSCEVType())) {
11596 " dominate the contained loop's header?");
11615 bool HasVarying =
false;
11616 for (
auto *
Op : cast<SCEVNAryExpr>(S)->operands()) {
11641 if (
auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
11660 auto &Values = BlockDispositions[S];
11661 for (
auto &V : Values) {
11662 if (V.getPointer() == BB)
11667 auto &Values2 = BlockDispositions[S];
11668 for (
auto &V :
make_range(Values2.rbegin(), Values2.rend())) {
11669 if (V.getPointer() == BB) {
11678 ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
11679 switch (static_cast<SCEVTypes>(S->
getSCEVType())) {
11703 bool Proper =
true;
11727 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
11753 bool ScalarEvolution::ExitLimit::hasOperand(
const SCEV *S)
const {
11754 auto IsS = [&](
const SCEV *
X) {
return S ==
X; };
11755 auto ContainsS = [&](
const SCEV *
X) {
11758 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken);
11762 ScalarEvolution::forgetMemoizedResults(
const SCEV *S) {
11763 ValuesAtScopes.erase(S);
11764 LoopDispositions.erase(S);
11765 BlockDispositions.erase(S);
11766 UnsignedRanges.erase(S);
11767 SignedRanges.erase(S);
11768 ExprValueMap.
erase(S);
11769 HasRecMap.
erase(S);
11770 MinTrailingZerosCache.erase(S);
11772 for (
auto I = PredicatedSCEVRewrites.begin();
11773 I != PredicatedSCEVRewrites.end();) {
11774 std::pair<const SCEV *, const Loop *> Entry = I->first;
11775 if (Entry.first == S)
11776 PredicatedSCEVRewrites.erase(I++);
11781 auto RemoveSCEVFromBackedgeMap =
11783 for (
auto I = Map.begin(), E = Map.end(); I !=
E;) {
11784 BackedgeTakenInfo &BEInfo = I->second;
11785 if (BEInfo.hasOperand(S,
this)) {
11793 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts);
11794 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
11798 ScalarEvolution::getUsedLoops(
const SCEV *S,
11800 struct FindUsedLoops {
11802 : LoopsUsed(LoopsUsed) {}
11804 bool follow(
const SCEV *S) {
11805 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(S))
11810 bool isDone()
const {
return false; }
11813 FindUsedLoops
F(LoopsUsed);
11817 void ScalarEvolution::addToLoopUseLists(
const SCEV *S) {
11819 getUsedLoops(S, LoopsUsed);
11820 for (
auto *L : LoopsUsed)
11821 LoopUsers[L].push_back(S);
11847 SCEVMapper SCM(SE2);
11849 while (!LoopStack.empty()) {
11850 auto *L = LoopStack.pop_back_val();
11851 LoopStack.insert(LoopStack.end(), L->
begin(), L->
end());
11853 auto *CurBECount = SCM.visit(
11884 auto *ConstantDelta =
11887 if (ConstantDelta && ConstantDelta->getAPInt() != 0) {
11888 dbgs() <<
"Trip Count Changed!\n";
11889 dbgs() <<
"Old: " << *CurBECount <<
"\n";
11890 dbgs() <<
"New: " << *NewBECount <<
"\n";
11891 dbgs() <<
"Delta: " << *ConstantDelta <<
"\n";
11926 "Scalar Evolution Analysis",
false,
true)
11934 char ScalarEvolutionWrapperPass::ID = 0;
11942 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
11943 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
11944 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
11945 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
11974 "Type mismatch between LHS and RHS");
11979 void *IP =
nullptr;
11980 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
11984 UniquePreds.InsertNode(Eq, IP);
11996 void *IP =
nullptr;
11997 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
11999 auto *OF =
new (SCEVAllocator)
12001 UniquePreds.InsertNode(OF, IP);
12021 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
12022 return Rewriter.visit(S);
12028 for (
auto *Pred : ExprPreds)
12029 if (
const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred))
12030 if (IPred->getLHS() == Expr)
12031 return IPred->getRHS();
12033 return convertToAddRecWithPreds(Expr);
12039 if (AR && AR->getLoop() == L && AR->isAffine()) {
12042 const SCEV *Step = AR->getStepRecurrence(SE);
12047 AR->getNoWrapFlags());
12055 if (AR && AR->getLoop() == L && AR->isAffine()) {
12058 const SCEV *Step = AR->getStepRecurrence(SE);
12063 AR->getNoWrapFlags());
12077 return Pred && Pred->
implies(P);
12086 return addOverflowAssumption(A);
12096 if (!isa<PHINode>(Expr->
getValue()))
12100 if (!PredicatedRewrite)
12102 for (
auto *P : PredicatedRewrite->second){
12104 if (
auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
12105 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
12109 if (!addOverflowAssumption(P))
12112 return PredicatedRewrite->first;
12124 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
12131 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
12139 for (
auto *
P : TransformPreds)
12148 : FastID(ID), Kind(Kind) {}
12154 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
12163 return Op->LHS == LHS &&
Op->RHS == RHS;
12171 OS.
indent(Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
12184 return Op &&
Op->AR == AR &&
setFlags(Flags,
Op->Flags) == Flags;
12220 if (Step->getValue()->getValue().isNonNegative())
12224 return ImpliedFlags;
12238 auto I = SCEVToPreds.
find(Expr);
12239 if (
I == SCEVToPreds.
end())
12245 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
12246 return all_of(Set->Preds,
12249 auto ScevPredsIt = SCEVToPreds.
find(N->
getExpr());
12250 if (ScevPredsIt == SCEVToPreds.
end())
12252 auto &SCEVPreds = ScevPredsIt->second;
12254 return any_of(SCEVPreds,
12261 for (
auto Pred : Preds)
12262 Pred->print(OS, Depth);
12266 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
12267 for (
auto Pred : Set->Preds)
12276 assert(Key &&
"Only SCEVUnionPredicate doesn't have an " 12277 " associated expression!");
12279 SCEVToPreds[
Key].push_back(N);
12280 Preds.push_back(N);
12289 RewriteEntry &Entry = RewriteMap[Expr];
12292 if (Entry.second && Generation == Entry.first)
12293 return Entry.second;
12298 Expr = Entry.second;
12301 Entry = {Generation, NewSCEV};
12307 if (!BackedgeCount) {
12312 return BackedgeCount;
12319 updateGeneration();
12326 void PredicatedScalarEvolution::updateGeneration() {
12328 if (++Generation == 0) {
12329 for (
auto &II : RewriteMap) {
12330 const SCEV *Rewritten = II.second.second;
12339 const auto *AR = cast<SCEVAddRecExpr>(Expr);
12347 auto II = FlagsMap.insert({V, Flags});
12355 const auto *AR = cast<SCEVAddRecExpr>(Expr);
12360 auto II = FlagsMap.find(V);
12362 if (II != FlagsMap.end())
12376 for (
auto *
P : NewPreds)
12379 updateGeneration();
12380 RewriteMap[SE.
getSCEV(V)] = {Generation, New};
12386 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds),
12387 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
12388 for (
const auto &
I : Init.FlagsMap)
12389 FlagsMap.insert(
I);
12395 for (
auto &
I : *BB) {
12400 auto II = RewriteMap.find(Expr);
12402 if (II == RewriteMap.end())
12406 if (II->second.second == Expr)
12409 OS.
indent(Depth) <<
"[PSE]" <<
I <<
":\n";
12410 OS.
indent(Depth + 2) << *Expr <<
"\n";
12411 OS.
indent(Depth + 2) <<
"--> " << *II->second.second <<
"\n";
12419 bool ScalarEvolution::matchURem(
const SCEV *Expr,
const SCEV *&LHS,
12420 const SCEV *&RHS) {
12422 if (
Add ==
nullptr ||
Add->getNumOperands() != 2)
12425 const SCEV *A =
Add->getOperand(1);
12428 if (Mul ==
nullptr)
12431 const auto MatchURemWithDivisor = [&](
const SCEV *
B) {
12433 if (Expr == getURemExpr(A,
B)) {
12442 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
12443 return MatchURemWithDivisor(Mul->getOperand(1)) ||
12444 MatchURemWithDivisor(Mul->getOperand(2));
12447 if (Mul->getNumOperands() == 2)
12448 return MatchURemWithDivisor(Mul->getOperand(1)) ||
12449 MatchURemWithDivisor(Mul->getOperand(0)) ||
12450 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
12451 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
APInt abs() const
Get the absolute value;.
static Optional< APInt > MinOptional(Optional< APInt > X, Optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X, Y), (b) if neither X nor Y exist, return None, (c) if exactly one of X and Y exists, return that value.
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Returns true if the given value is known be positive (i.e.
virtual const SCEV * getExpr() const =0
Returns the SCEV to which this predicate applies, or nullptr if this is a SCEVUnionPredicate.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
void AddPointer(const void *Ptr)
Add* - Add various data types to Bit data.
unsigned getSmallConstantTripCount(const Loop *L)
Returns the maximum trip count of the loop if it is a single-exit loop and we can compute a small max...
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
This routine provides some synthesis utilities to produce sequences of values.
A parsed version of the target data layout string in and methods for querying it. ...
static ConstantInt * getFalse(LLVMContext &Context)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
This class is the base class for the comparison instructions.
The SCEV properly dominates the block.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static IntegerType * getInt1Ty(LLVMContext &C)
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
uint64_t getZExtValue() const
Get zero extended value.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
APInt sext(unsigned width) const
Sign extend to a new width.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
static Optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
SCEVCastExpr(const FoldingSetNodeIDRef ID, unsigned SCEVTy, const SCEV *op, Type *ty)
unsigned getValueID() const
Return an ID for the concrete type of this object.
static Type * getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1)
ConstantRange unionWith(const ConstantRange &CR) const
Return the range that results from the union of this range with another range.
unsigned getSmallConstantTripMultiple(const Loop *L)
Returns the largest constant divisor of the trip count of the loop if it is a single-exit loop and we...
static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT)
Perform a quick domtree based check for loop invariance assuming that V is used within the loop...
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const SCEV * getConstant(ConstantInt *V)
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This class represents lattice values for constants.
const APInt & getUpper() const
Return the upper value for this range.
void delinearize(const SCEV *Expr, SmallVectorImpl< const SCEV *> &Subscripts, SmallVectorImpl< const SCEV *> &Sizes, const SCEV *ElementSize)
Split this SCEVAddRecExpr into two vectors of SCEVs representing the subscripts and sizes of an array...
static const SCEV * removeConstantFactors(ScalarEvolution &SE, const SCEV *T)
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
A Module instance is used to store all the information related to an LLVM module. ...
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
void dump() const
This method is used for debugging.
SCEVUnionPredicate()
Union predicates don't get cached so create a dummy set ID for it.
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
virtual bool implies(const SCEVPredicate *N) const =0
Returns true if this predicate implies N.
size_t getNumOperands() const
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
unsigned getLoopDepth() const
Return the nesting level of this loop.
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
void push_back(const T &Elt)
const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
The main scalar evolution driver.
APInt zext(unsigned width) const
Zero extend to a new width.
bool slt(const APInt &RHS) const
Signed less than comparison.
bool isZero() const
Return true if the expression is a constant zero.
This class represents a function call, abstracting a target machine's calling convention.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW. ...
An immutable pass that tracks lazily created AssumptionCache objects.
Value * getCondition() const
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
const Value * getTrueValue() const
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
ConstantRange truncate(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const
Intern - Copy this node's data to a memory region allocated from the given allocator and return a Fol...
static LLVM_NODISCARD SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
A cache of @llvm.assume calls within a function.
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block...
uint32_t GetMinTrailingZeros(const SCEV *S)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration)...
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
LLVMContext & getContext() const
All values hold a context through their type.
This class represents a truncation of an integer value to a smaller integer value.
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const Use & getOperandUse(unsigned i) const
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
BasicBlock * getSuccessor(unsigned i) const
APInt trunc(unsigned width) const
Truncate to new width.
bool isMonotonicPredicate(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred, bool &Increasing)
Return true if, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or...
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
STATISTIC(NumFunctions, "Total number of functions")
ConstantRange udiv(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned division of a value in...
bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS...
Analysis pass which computes a DominatorTree.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
block Block Frequency true
An instruction for reading from memory.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
static std::pair< const SCEV *, ConstantInt * > splitAddExpr(const SCEV *S)
Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
const SCEV * getOperand() const
Value * getCondition() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant *> &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node in the loop has the value PHIVal.
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
An object of this class is returned by queries that could not be answered.
static cl::opt< bool > VerifySCEVMap("verify-scev-maps", cl::Hidden, cl::desc("Verify no dangling value in ScalarEvolution's " "ExprValueMap (slow)"))
This defines the Use class.
void reserve(size_type N)
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal...
bool hasNoSignedWrap() const
bool isKnownOnEveryIteration(ICmpInst::Predicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
void print(raw_ostream &OS) const
Print out the bounds to a stream.
INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", "Scalar Evolution Analysis", false, true) INITIALIZE_PASS_END(ScalarEvolutionWrapperPass
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getExpr() const override
Returns the SCEV to which this predicate applies, or nullptr if this is a SCEVUnionPredicate.
static Optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead...
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
ConstantInt * findCaseDest(BasicBlock *BB)
Finds the unique case value for a given successor.
This is the base class for unary cast operator classes.
bool propagatesFullPoison(const Instruction *I)
Return true if this function can prove that I is guaranteed to yield full-poison (all bits poison) if...
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
bool isOffsetOf(Type *&STy, Constant *&FieldNo) const
static Optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation...
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
iterator begin()
Instruction iterator methods.
bool isIdenticalTo(const Instruction *I) const
Return true if the specified instruction is exactly identical to the current one. ...
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
bool isSingleEdge() const
Check if this is the only edge between Start and End.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
bool isKnownViaInduction(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
The SCEV is loop-invariant.
SI optimize exec mask operations pre RA
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static cl::opt< bool > VerifySCEV("verify-scev", cl::Hidden, cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static LLVM_NODISCARD SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
bool match(Val *V, const Pattern &P)
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
#define INITIALIZE_PASS_DEPENDENCY(depName)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
ArrayRef< const SCEVPredicate * > getPredicatesForExpr(const SCEV *Expr)
Returns a reference to a vector containing all predicates which apply to Expr.
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
void collectParametricTerms(const SCEV *Expr, SmallVectorImpl< const SCEV *> &Terms)
Collect parametric terms occurring in step expressions (first step of delinearization).
void setBit(unsigned BitPosition)
Set a given bit to 1.
bool isAlignOf(Type *&AllocTy) const
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
This class represents the LLVM 'select' instruction.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
const Loop * getLoop() const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
APInt shl(unsigned shiftAmt) const
Left-shift function.
const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range...
Class to represent struct types.
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
A Use represents the edge between a Value definition and its users.
bool isSizeOf(Type *&AllocTy) const
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction *> &Worklist)
Push users of the given Instruction onto the given Worklist.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVMContext & getContext() const
static Constant * AddOne(Constant *C)
Add one to a Constant.
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
void AddInteger(signed I)
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This node is the base class for n'ary commutative operators.
ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
Analysis pass that exposes the LoopInfo for a function.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This node represents multiplication of some number of SCEVs.
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, ScalarEvolution &SE)
Finds the minimum unsigned root of the following equation:
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
zlib-gnu style compression
This file implements a class to represent arbitrary precision integral constant values and operations...
const APInt & getAPInt() const
BlockT * getHeader() const
bool programUndefinedIfFullPoison(const Instruction *PoisonI)
Return true if this function can prove that if PoisonI is executed and yields a full-poison value (al...
static int64_t getConstant(const MachineInstr *MI)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
ConstantInt * getValue() const
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
unsigned getActiveBits() const
Compute the number of active bits in the value.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS...
static bool isPrivateLinkage(LinkageTypes Linkage)
A constant value that is initialized with an expression using other constant values.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
op_range operands() const
const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
Type * getType() const
All values are typed, get the type of this value.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
This node represents a polynomial recurrence on the trip count of the specified loop.
bool implies(const SCEVPredicate *N) const override
Implementation of the SCEVPredicate interface.
const T & getValue() const LLVM_LVALUE_FUNCTION
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
static bool containsParameters(SmallVectorImpl< const SCEV *> &Terms)
static bool CollectAddOperandsWithScales(DenseMap< const SCEV *, APInt > &M, SmallVectorImpl< const SCEV *> &NewOps, APInt &AccumulatedConstant, const SCEV *const *Ops, size_t NumOperands, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale...
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
const SCEV * getSizeOfExpr(Type *IntTy, Type *AllocTy)
Return an expression for sizeof AllocTy that is type IntTy.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
ConstantRange intersectWith(const ConstantRange &CR) const
Return the range that results from the intersection of this range with another range.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
const APInt & getValue() const
Return the constant as an APInt value reference.
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
An instruction for storing to memory.
op_iterator op_begin() const
bool isKnownNonNegative(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Returns true if the give value is known to be non-negative.
static Constant * getUDiv(Constant *C1, Constant *C2, bool isExact=false)
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
Value * getOperand(unsigned i) const
Class to represent pointers.
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeSatisfyingICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the largest range such that all values in the returned range satisfy the given predicate with...
static cl::opt< unsigned > MaxIterations("max-cg-scc-iterations", cl::ReallyHidden, cl::init(4))
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
bool implies(const SCEVPredicate *N) const override
Returns true if this predicate implies N.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
iterator find(const_arg_type_t< KeyT > Val)
bool isNegative() const
Returns true if this value is known to be negative.
bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS. ...
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
const SCEV * getCouldNotCompute()
Visit all nodes in the expression tree using worklist traversal.
ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
bool isNegative() const
Determine sign of this APInt.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
initializer< Ty > init(const Ty &Val)
const SCEV * getOperand(unsigned i) const
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
bool erase(const KeyT &Val)
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
static bool isInternalLinkage(LinkageTypes Linkage)
static void GroupByComplexity(SmallVectorImpl< const SCEV *> &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt...
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
This class defines a simple visitor class that may be used for various SCEV analysis purposes...
static cl::opt< unsigned > MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt"), cl::init(8))
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM Basic Block Representation.
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop...
This class represents a binary unsigned division operation.
The instances of the Type class are immutable: once they are created, they are never changed...
LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
Conditional or Unconditional Branch instruction.
bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
static bool IsMinConsistingOf(ScalarEvolution &SE, const SCEV *MaybeMinExpr, const SCEV *Candidate)
Is MaybeMinExpr an SMin or UMin of Candidate and some other values?
bool ult(const APInt &RHS) const
Unsigned less than comparison.
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
LLVM_NODISCARD bool empty() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
APInt multiplicativeInverse(const APInt &modulo) const
Computes the multiplicative inverse of this APInt for a given modulo.
const SCEV * getExpr() const override
Implementation of the SCEVPredicate interface.
SCEVEqualPredicate(const FoldingSetNodeIDRef ID, const SCEV *LHS, const SCEV *RHS)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV *> &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
void add(const SCEVPredicate *N)
Adds a predicate to this union.
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
const SCEV * getLHS() const
const SCEV * getGEPExpr(GEPOperator *GEP, const SmallVectorImpl< const SCEV *> &IndexExprs)
Returns an expression for a GEP.
void visitAll(const SCEV *Root)
BasicBlock * getDefaultDest() const
const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II, const DominatorTree &DT)
Returns true if the arithmetic part of the II 's result is used only along the paths control dependen...
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Represent the analysis usage information of a pass.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
This instruction compares its operands according to the predicate given to the constructor.
const SCEV * getMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl...
op_iterator op_end() const
const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, SCEVUnionPredicate &A)
Re-writes the SCEV according to the Predicates in A.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SCEVUnionPredicate &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
Value * getPointerOperand()
bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
FunctionPass class - This class is used to implement most global optimizations.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values...
bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced=false)
get* - Return some common constants without having to specify the full Instruction::OPCODE identifier...
iterator_range< block_iterator > blocks()
const SCEV * getStart() const
Class to represent integer types.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type. ...
static int CompareValueComplexity(EquivalenceClasses< const Value *> &EqCacheValue, const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static Constant * getNot(Constant *C)
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
const Value * getCondition() const
static Constant * getAllOnesValue(Type *Ty)
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
void getExitingBlocks(SmallVectorImpl< BlockT *> &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, BasicBlock *BB)
iterator erase(const_iterator CI)
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, const SmallVectorImpl< const SCEV *> &Ops, SCEV::NoWrapFlags Flags)
Constant * ConstantFoldLoadThroughGEPIndices(Constant *C, ArrayRef< Constant *> Indices)
ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr indices (with an implied zero ...
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCompareInstOperands - Attempt to constant fold a compare instruction (icmp/fcmp) with the...
const SCEV * getMulExpr(SmallVectorImpl< const SCEV *> &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
The SCEV is loop-variant (unknown).
This class represents an assumption made using SCEV expressions which can be checked at run-time...
void sort(IteratorTy Start, IteratorTy End)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
bool isEmptySet() const
Return true if this set contains no members.
A function analysis which provides an AssumptionCache.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static int numberOfTerms(const SCEV *S)
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
unsigned getSCEVType() const
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
unsigned getNumOperands() const
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2)
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
bool isMaxSignedValue() const
Determine if this is the largest signed value.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the generic address space (address sp...
This is the shared class of boolean and integer constants.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
const SCEVUnionPredicate & getUnionPredicate() const
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
Type * getType() const
Return the LLVM type of this SCEV expression.
void initializeScalarEvolutionWrapperPassPass(PassRegistry &)
const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty)
static LLVM_NODISCARD SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool hasNoSelfWrap() const
const SCEV *const * op_iterator
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
This is a utility class that provides an abstraction for the common functionality between Instruction...
Provides information about what library functions are available for the current target.
The SCEV dominates the block.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
This class represents a range of values.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_NODISCARD T pop_back_val()
static LLVM_NODISCARD SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
CHAIN = SC CHAIN, Imm128 - System call.
bool isMaxValue() const
Determine if this is the largest unsigned value.
ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
bool isConditional() const
iterator_range< detail::value_sequence_iterator< ValueT > > seq(ValueT Begin, ValueT End)
pred_range predecessors(BasicBlock *BB)
Optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
The access may modify the value stored in memory.
scalar Scalar Evolution Analysis
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
bool isTrueWhenEqual() const
This is just a convenience.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
Class for arbitrary precision integers.
This node represents an addition of some number of SCEVs.
bool isFalseWhenEqual() const
This is just a convenience.
Type * getWiderType(Type *Ty1, Type *Ty2) const
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
void setPreservesAll()
Set by analyses that do not transform their input at all.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
This class represents a signed maximum selection.
iterator_range< user_iterator > users()
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
static void clear(coro::Shape &Shape)
iterator insert(iterator I, T &&Elt)
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
const Value * getFalseValue() const
A utility class that uses RAII to save and restore the value of a variable.
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
SCEVPredicate(const SCEVPredicate &)=default
amdgpu Simplify well known AMD library false Value Value * Arg
ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static Optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
Analysis pass that exposes the ScalarEvolution for a function.
ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
This class represents a zero extension of a small integer value to a larger integer value...
LoopT * getParentLoop() const
Virtual Register Rewriter
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
bool ugt(const APInt &RHS) const
Unsigned greather than comparison.
Predicate getPredicate() const
Return the predicate for this instruction.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
The SCEV does not dominate the block.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID, which can be a useful to store node id data rather than using plain FoldingSetNodeIDs, since the 32-element SmallVector is often much larger than necessary, and the possibility of heap allocation means it requires a non-trivial destructor call.
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
pointer data()
Return a pointer to the vector's buffer, even if empty().
void emplace_back(ArgTypes &&... Args)
This class represents an analyzed expression in the program.
static IntegerType * getInt32Ty(LLVMContext &C)
bool hasNoUnsignedWrap() const
bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Returns true if the given value is known be negative (i.e.
LLVM_NODISCARD bool empty() const
unsigned greater or equal
Represents a single loop in the control flow graph.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
const Function * getParent() const
Return the enclosing method, or null if none.
void findArrayDimensions(SmallVectorImpl< const SCEV *> &Terms, SmallVectorImpl< const SCEV *> &Sizes, const SCEV *ElementSize)
Compute the array dimensions Sizes from the set of Terms extracted from the memory access function of...
const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallPtrSetImpl< const SCEVPredicate *> &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
bool isEquality() const
Return true if this predicate is either EQ or NE.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
const APInt & getLower() const
Return the lower value for this range.
Optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V)
Check whether value has nuw/nsw/exact set but SCEV does not.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction *> &Worklist)
Push PHI nodes in the header of the given loop onto the given Worklist.
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
This class represents a sign extension of a small integer value to a larger integer value...
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class represents an unsigned maximum selection.
static LLVM_NODISCARD SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
static int CompareSCEVComplexity(EquivalenceClasses< const SCEV *> &EqCacheSCEV, EquivalenceClasses< const Value *> &EqCacheValue, const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
static bool findArrayDimensionsRec(ScalarEvolution &SE, SmallVectorImpl< const SCEV *> &Terms, SmallVectorImpl< const SCEV *> &Sizes)
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant *> Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands...
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
AnalysisUsage & addRequiredTransitive()
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
bool isUnconditional() const
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
void print(raw_ostream &OS) const
const SCEV * getRHS() const
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere...
bool isMinValue() const
Determine if this is the smallest unsigned value.
static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD)
API to communicate dependencies between analyses during invalidation.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
Analysis pass providing the TargetLibraryInfo.
void computeAccessFunctions(const SCEV *Expr, SmallVectorImpl< const SCEV *> &Subscripts, SmallVectorImpl< const SCEV *> &Sizes)
Return in Subscripts the access functions for each dimension in Sizes (third step of delinearization)...
iterator_range< df_iterator< T > > depth_first(const T &G)
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
const SCEV * getBackedgeTakenCount(const Loop *L)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
static ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Return the largest range containing all X such that "X BinOpC Y" is guaranteed not to wrap (overflow)...
bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This templated class represents "all analyses that operate over <a particular IR unit>" (e...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
user_iterator user_begin()
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
bool isOne() const
Return true if the expression is a constant one.
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS)
static Optional< BinaryOp > MatchBinaryOp(Value *V, DominatorTree &DT)
Try to map V into a BinaryOp, and return None on failure.
static Optional< APInt > TruncIfPossible(Optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const SCEV * getExpr() const override
Returns the SCEV to which this predicate applies, or nullptr if this is a SCEVUnionPredicate.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
A vector that has set insertion semantics.
ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
void eraseValueFromMap(Value *V)
Erase Value from ValueExprMap and ExprValueMap.
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
succ_range successors(Instruction *I)
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
bool canConstantFoldCallTo(ImmutableCallSite CS, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function...
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
This file provides utility classes that use RAII to save and restore values.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
This class implements an extremely fast bulk output stream that can only output to a stream...
bool implies(const SCEVPredicate *N) const override
Returns true if this predicate implies N.
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getMaxBackedgeTakenCount or z...
const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getUnknown(Value *V)
The SCEV varies predictably with the loop.
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, ValueToValueMap &Map, bool InterpretConsts=false)
The legacy pass manager's analysis pass to compute loop information.
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block...
Value handle with callbacks on RAUW and destruction.
static int sizeOfSCEV(const SCEV *S)
StringRef - Represent a constant reference to a string, i.e.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Type * getSourceElementType() const
inst_range instructions(Function *F)
bool isNonNegative() const
Returns true if this value is known to be non-negative.
A container for analyses that lazily runs them and caches their results.
const SCEV * getExitCount(const Loop *L, BasicBlock *ExitingBlock)
Get the expression for the number of loop iterations for which this loop is guaranteed not to exit vi...
static Optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS, const DataLayout &DL, unsigned Depth)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred ALHS ARHS" is true.
Legacy analysis pass which computes a DominatorTree.
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, const SCEV *Candidate)
Is MaybeMaxExpr an SMax or UMax of Candidate and some other values?
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
bool isLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, const SCEV *&InvariantRHS)
Return true if the result of the predicate LHS Pred RHS is loop invariant with respect to L...
static bool containsUndefs(const SCEV *S)
bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop. ...
This node is a base class providing common functionality for n'ary operators.
static Constant * getMul(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
bool isEquivalent(const ElemTy &V1, const ElemTy &V2) const
const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
This class represents an assumption made on an AddRec expression.
PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
ValTy * getReturnedArgOperand() const
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count...
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
This class represents an assumption that two SCEV expressions are equal, and this can be checked at r...
A special type used by analysis passes to provide an address that identifies that particular analysis...
APInt sextOrSelf(unsigned width) const
Sign extend or truncate to width.
static bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
This visitor recursively visits a SCEV expression and re-writes it.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isNullValue() const
Determine if all bits are clear.
bool isStructTy() const
True if this is an instance of StructType.
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isArrayTy() const
True if this is an instance of ArrayType.
const BasicBlock * getParent() const
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, unsigned BitWidth, bool Signed)
This class represents a constant integer value.
const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
void forgetTopmostLoop(const Loop *L)
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode *> &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?