56 if (U->getType() == Ty)
57 if (
CastInst *CI = dyn_cast<CastInst>(U))
58 if (CI->getOpcode() ==
Op) {
68 CI->replaceAllUsesWith(Ret);
83 assert(SE.DT.dominates(Ret, &*BIP));
85 rememberInstruction(Ret);
92 if (
auto *II = dyn_cast<InvokeInst>(I))
93 IP = II->getNormalDest()->begin();
95 while (isa<PHINode>(IP))
98 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
100 }
else if (isa<CatchSwitchInst>(IP)) {
103 assert(!IP->isEHPad() &&
"unexpected eh pad!");
114 assert((Op == Instruction::BitCast ||
115 Op == Instruction::PtrToInt ||
116 Op == Instruction::IntToPtr) &&
117 "InsertNoopCastOfTo cannot perform non-noop casts!");
118 assert(SE.getTypeSizeInBits(V->
getType()) == SE.getTypeSizeInBits(Ty) &&
119 "InsertNoopCastOfTo cannot change sizes!");
122 if (Op == Instruction::BitCast) {
125 if (
CastInst *CI = dyn_cast<CastInst>(V)) {
126 if (CI->getOperand(0)->getType() == Ty)
127 return CI->getOperand(0);
131 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
132 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->
getType())) {
133 if (
CastInst *CI = dyn_cast<CastInst>(V))
134 if ((CI->getOpcode() == Instruction::PtrToInt ||
135 CI->getOpcode() == Instruction::IntToPtr) &&
136 SE.getTypeSizeInBits(CI->getType()) ==
137 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
138 return CI->getOperand(0);
140 if ((CE->getOpcode() == Instruction::PtrToInt ||
141 CE->getOpcode() == Instruction::IntToPtr) &&
142 SE.getTypeSizeInBits(CE->getType()) ==
143 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
144 return CE->getOperand(0);
153 if (
Argument *A = dyn_cast<Argument>(V)) {
155 while ((isa<BitCastInst>(IP) &&
156 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
157 cast<BitCastInst>(IP)->getOperand(0) != A) ||
158 isa<DbgInfoIntrinsic>(IP))
160 return ReuseOrCreateCast(A, Ty, Op, IP);
166 return ReuseOrCreateCast(I, Ty, Op, IP);
174 if (
Constant *CLHS = dyn_cast<Constant>(LHS))
175 if (
Constant *CRHS = dyn_cast<Constant>(RHS))
179 unsigned ScanLimit = 6;
183 if (IP != BlockBegin) {
185 for (; ScanLimit; --IP, --ScanLimit) {
188 if (isa<DbgInfoIntrinsic>(IP))
196 if (isa<OverflowingBinaryOperator>(
I) &&
197 (
I->hasNoSignedWrap() ||
I->hasNoUnsignedWrap()))
199 if (isa<PossiblyExactOperator>(
I) &&
I->isExact())
203 if (IP->getOpcode() == (
unsigned)Opcode && IP->getOperand(0) == LHS &&
204 IP->getOperand(1) == RHS && !canGeneratePoison(&*IP))
206 if (IP == BlockBegin)
break;
211 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
212 SCEVInsertPointGuard Guard(Builder,
this);
215 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
216 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS))
break;
217 BasicBlock *Preheader = L->getLoopPreheader();
218 if (!Preheader)
break;
225 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
227 rememberInstruction(BO);
276 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
280 if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(M->getOperand(0)))
281 if (!
C->getAPInt().srem(FC->
getAPInt())) {
291 const SCEV *Step = A->getStepRecurrence(SE);
297 const SCEV *Start = A->getStart();
315 unsigned NumAddRecs = 0;
316 for (
unsigned i = Ops.
size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
322 const SCEV *Sum = NoAddRecs.empty() ?
330 else if (!Sum->isZero())
333 Ops.
append(AddRecs.begin(), AddRecs.end());
346 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
347 while (
const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
348 const SCEV *Start = A->getStart();
349 if (Start->
isZero())
break;
352 A->getStepRecurrence(SE),
358 e +=
Add->getNumOperands();
363 if (!AddRecs.
empty()) {
398 Value *SCEVExpander::expandAddToGEP(
const SCEV *
const *op_begin,
399 const SCEV *
const *op_end,
404 Type *ElTy = OriginalElTy;
407 bool AnyNonZeroIndices =
false;
413 Type *IntPtrTy = DL.getIntPtrType(PTy);
425 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
428 for (
const SCEV *Op : Ops) {
429 const SCEV *Remainder = SE.getConstant(Ty, 0);
435 AnyNonZeroIndices =
true;
443 if (!ScaledOps.
empty()) {
456 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
460 while (
StructType *STy = dyn_cast<StructType>(ElTy)) {
461 bool FoundFieldNo =
false;
463 if (STy->getNumElements() == 0)
break;
469 if (SE.getTypeSizeInBits(
C->
getType()) <= 64) {
471 uint64_t FullOffset =
C->getValue()->getZExtValue();
476 ElTy = STy->getTypeAtIndex(ElIdx);
479 AnyNonZeroIndices =
true;
487 ElTy = STy->getTypeAtIndex(0u);
493 if (
ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
494 ElTy = ATy->getElementType();
502 if (!AnyNonZeroIndices) {
504 V = InsertNoopCastOfTo(V,
507 assert(!isa<Instruction>(V) ||
508 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
511 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
514 if (
Constant *CLHS = dyn_cast<Constant>(V))
515 if (
Constant *CRHS = dyn_cast<Constant>(Idx))
520 unsigned ScanLimit = 6;
524 if (IP != BlockBegin) {
526 for (; ScanLimit; --IP, --ScanLimit) {
529 if (isa<DbgInfoIntrinsic>(IP))
531 if (IP->getOpcode() == Instruction::GetElementPtr &&
532 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
534 if (IP == BlockBegin)
break;
539 SCEVInsertPointGuard Guard(Builder,
this);
542 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
543 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx))
break;
544 BasicBlock *Preheader = L->getLoopPreheader();
545 if (!Preheader)
break;
552 Value *
GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx,
"uglygep");
553 rememberInstruction(GEP);
559 SCEVInsertPointGuard Guard(Builder,
this);
562 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
563 if (!L->isLoopInvariant(V))
break;
565 bool AnyIndexNotLoopInvariant =
any_of(
566 GepIndices, [L](
Value *Op) {
return !L->isLoopInvariant(Op); });
568 if (AnyIndexNotLoopInvariant)
571 BasicBlock *Preheader = L->getLoopPreheader();
572 if (!Preheader)
break;
583 Casted = InsertNoopCastOfTo(Casted, PTy);
584 Value *
GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices,
"scevgep");
586 rememberInstruction(GEP);
589 return expand(SE.getAddExpr(Ops));
594 const SCEV *
const Ops[1] = {Op};
595 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
614 const Loop *SCEVExpander::getRelevantLoop(
const SCEV *S) {
616 auto Pair = RelevantLoops.insert(std::make_pair(S,
nullptr));
618 return Pair.first->second;
620 if (isa<SCEVConstant>(S))
623 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
624 if (
const Instruction *
I = dyn_cast<Instruction>(U->getValue()))
625 return Pair.first->second = SE.LI.getLoopFor(
I->getParent());
630 const Loop *L =
nullptr;
633 for (
const SCEV *Op :
N->operands())
635 return RelevantLoops[
N] = L;
639 return RelevantLoops[
C] = Result;
643 getRelevantLoop(
D->getLHS()), getRelevantLoop(
D->getRHS()), SE.DT);
644 return RelevantLoops[
D] = Result;
657 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
658 std::pair<const Loop *, const SCEV *> RHS)
const {
660 if (LHS.second->getType()->isPointerTy() !=
661 RHS.second->getType()->isPointerTy())
662 return LHS.second->getType()->isPointerTy();
665 if (LHS.first != RHS.first)
671 if (LHS.second->isNonConstantNegative()) {
672 if (!RHS.second->isNonConstantNegative())
674 }
else if (RHS.second->isNonConstantNegative())
692 for (std::reverse_iterator<SCEVAddExpr::op_iterator>
I(S->
op_end()),
694 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(*
I), *
I));
698 std::stable_sort(OpsAndLoops.
begin(), OpsAndLoops.
end(), LoopCompare(SE.DT));
702 Value *Sum =
nullptr;
703 for (
auto I = OpsAndLoops.
begin(),
E = OpsAndLoops.
end();
I !=
E;) {
704 const Loop *CurLoop =
I->first;
705 const SCEV *Op =
I->second;
710 }
else if (
PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
714 for (;
I !=
E &&
I->first == CurLoop; ++
I) {
717 const SCEV *
X =
I->second;
718 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
719 if (!isa<Instruction>(U->getValue()))
720 X = SE.getSCEV(U->getValue());
723 Sum = expandAddToGEP(NewOps.
begin(), NewOps.
end(), PTy, Ty, Sum);
729 NewOps.
push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
731 for (++
I;
I !=
E &&
I->first == CurLoop; ++
I)
733 Sum = expandAddToGEP(NewOps.
begin(), NewOps.
end(), PTy, Ty,
expand(Op));
736 Value *
W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
737 Sum = InsertNoopCastOfTo(Sum, Ty);
738 Sum = InsertBinop(Instruction::Sub, Sum, W);
742 Value *
W = expandCodeFor(Op, Ty);
743 Sum = InsertNoopCastOfTo(Sum, Ty);
745 if (isa<Constant>(Sum))
std::swap(Sum, W);
760 for (std::reverse_iterator<SCEVMulExpr::op_iterator>
I(S->
op_end()),
762 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(*
I), *
I));
765 std::stable_sort(OpsAndLoops.
begin(), OpsAndLoops.
end(), LoopCompare(SE.DT));
769 Value *Prod =
nullptr;
770 auto I = OpsAndLoops.
begin();
775 const auto ExpandOpBinPowN = [
this, &
I, &OpsAndLoops, &Ty]() {
785 while (
E != OpsAndLoops.end() && *
I == *
E && Exponent != MaxExponent) {
789 assert(Exponent > 0 &&
"Trying to calculate a zeroth exponent of operand?");
793 Value *
P = expandCodeFor(
I->second, Ty);
794 Value *Result =
nullptr;
797 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
798 P = InsertBinop(Instruction::Mul, P, P);
799 if (Exponent & BinExp)
800 Result = Result ? InsertBinop(Instruction::Mul, Result, P) :
P;
804 assert(Result &&
"Nothing was expanded?");
808 while (
I != OpsAndLoops.end()) {
811 Prod = ExpandOpBinPowN();
812 }
else if (
I->second->isAllOnesValue()) {
814 Prod = InsertNoopCastOfTo(Prod, Ty);
819 Value *
W = ExpandOpBinPowN();
820 Prod = InsertNoopCastOfTo(Prod, Ty);
822 if (isa<Constant>(Prod))
std::swap(Prod, W);
827 Prod = InsertBinop(Instruction::Shl, Prod,
830 Prod = InsertBinop(Instruction::Mul, Prod, W);
843 const APInt &RHS =
SC->getAPInt();
845 return InsertBinop(Instruction::LShr, LHS,
850 return InsertBinop(Instruction::UDiv, LHS, RHS);
858 while (
const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
859 Base = A->getStart();
862 A->getStepRecurrence(SE),
866 if (
const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
867 Base = A->getOperand(A->getNumOperands()-1);
869 NewAddOps.
back() = Rest;
880 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
885 if (L == IVIncInsertLoop) {
887 OE = IncV->
op_end(); OI != OE; ++OI)
888 if (
Instruction *OInst = dyn_cast<Instruction>(OI))
889 if (!SE.DT.dominates(OInst, IVIncInsertPos))
903 return isNormalAddRecExprPHI(PN, IncV, L);
918 if (IncV == InsertPos)
926 case Instruction::Sub: {
928 if (!OInst || SE.DT.dominates(OInst, InsertPos))
929 return dyn_cast<Instruction>(IncV->
getOperand(0));
932 case Instruction::BitCast:
934 case Instruction::GetElementPtr:
936 if (isa<Constant>(*
I))
939 if (!SE.DT.dominates(OInst, InsertPos))
952 unsigned AS = cast<PointerType>(IncV->
getType())->getAddressSpace();
972 if (Builder.GetInsertPoint() == It)
973 Builder.SetInsertPoint(&*NewInsertPt);
974 for (
auto *InsertPtGuard : InsertPointGuards)
975 if (InsertPtGuard->GetInsertPoint() == It)
976 InsertPtGuard->SetInsertPoint(NewInsertPt);
983 if (SE.DT.dominates(IncV, InsertPos))
988 if (isa<PHINode>(InsertPos) ||
992 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
998 Instruction *Oper = getIVIncOperand(IncV, InsertPos,
true);
1004 if (SE.DT.dominates(IncV, InsertPos))
1007 for (
auto I = IVIncs.
rbegin(),
E = IVIncs.
rend(); I !=
E; ++
I) {
1008 fixupInsertPoints(*I);
1009 (*I)->moveBefore(InsertPos);
1039 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1042 if (!isa<ConstantInt>(StepV))
1045 IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1047 IncV = Builder.CreateBitCast(IncV, PN->
getType());
1048 rememberInstruction(IncV);
1051 IncV = useSubtract ?
1052 Builder.CreateSub(PN, StepV,
Twine(IVName) +
".iv.next") :
1053 Builder.CreateAdd(PN, StepV,
Twine(IVName) +
".iv.next");
1054 rememberInstruction(IncV);
1068 fixupInsertPoints(InstToHoist);
1071 InstToHoist = cast<Instruction>(InstToHoist->
getOperand(0));
1072 }
while (InstToHoist != LoopPhi);
1093 if (Phi == Requested) {
1109 if (!isa<IntegerType>(AR->
getType()))
1117 const SCEV *ExtendAfterOp =
1119 return ExtendAfterOp == OpAfterExtend;
1123 if (!isa<IntegerType>(AR->
getType()))
1131 const SCEV *ExtendAfterOp =
1133 return ExtendAfterOp == OpAfterExtend;
1140 SCEVExpander::getAddRecExprPHILiterally(
const SCEVAddRecExpr *Normalized,
1146 assert((!IVIncInsertLoop||IVIncInsertPos) &&
"Uninitialized insert position");
1151 PHINode *AddRecPhiMatch =
nullptr;
1158 bool TryNonMatchingSCEV =
1160 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1163 if (!SE.isSCEVable(PN.
getType()))
1170 bool IsMatchingSCEV = PhiSCEV == Normalized;
1174 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1185 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1187 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1190 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1195 if (IsMatchingSCEV) {
1199 AddRecPhiMatch = &PN;
1205 if ((!TruncTy || InvertStep) &&
1209 AddRecPhiMatch = &PN;
1211 TruncTy = SE.getEffectiveSCEVType(Normalized->
getType());
1215 if (AddRecPhiMatch) {
1218 if (L == IVIncInsertLoop)
1219 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1223 InsertedValues.insert(AddRecPhiMatch);
1225 rememberInstruction(IncV);
1226 return AddRecPhiMatch;
1231 SCEVInsertPointGuard Guard(Builder,
this);
1241 PostIncLoops.
clear();
1245 "Can't expand add recurrences without a loop preheader!");
1246 Value *StartV = expandCodeFor(Normalized->
getStart(), ExpandTy,
1251 assert(!isa<Instruction>(StartV) ||
1252 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1263 Step = SE.getNegativeSCEV(Step);
1270 bool IncrementIsNUW = !useSubtract &&
IsIncrementNUW(SE, Normalized);
1271 bool IncrementIsNSW = !useSubtract &&
IsIncrementNSW(SE, Normalized);
1275 Builder.SetInsertPoint(Header, Header->
begin());
1277 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1278 Twine(IVName) +
".iv");
1279 rememberInstruction(PN);
1296 Builder.SetInsertPoint(InsertPos);
1297 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1299 if (isa<OverflowingBinaryOperator>(IncV)) {
1301 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1303 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1310 PostIncLoops = SavedPostIncLoops;
1313 InsertedValues.
insert(PN);
1320 Type *IntTy = SE.getEffectiveSCEVType(STy);
1326 if (PostIncLoops.count(L)) {
1334 const SCEV *PostLoopOffset =
nullptr;
1335 if (!SE.properlyDominates(Start, L->
getHeader())) {
1336 PostLoopOffset = Start;
1337 Start = SE.getConstant(Normalized->
getType(), 0);
1338 Normalized = cast<SCEVAddRecExpr>(
1346 const SCEV *PostLoopScale =
nullptr;
1347 if (!SE.dominates(Step, L->
getHeader())) {
1348 PostLoopScale = Step;
1349 Step = SE.getConstant(Normalized->
getType(), 1);
1350 if (!Start->isZero()) {
1353 assert(!PostLoopOffset &&
"Start not-null but PostLoopOffset set?");
1354 PostLoopOffset = Start;
1355 Start = SE.getConstant(Normalized->
getType(), 0);
1358 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1359 Start, Step, Normalized->
getLoop(),
1365 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1368 Type *AddRecPHIExpandTy =
1369 DL.isNonIntegralPointerType(STy) ? Normalized->
getType() : ExpandTy;
1373 Type *TruncTy =
nullptr;
1374 bool InvertStep =
false;
1375 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1376 IntTy, TruncTy, InvertStep);
1380 if (!PostIncLoops.count(L))
1385 assert(LatchBlock &&
"PostInc mode requires a unique loop latch!");
1391 if (isa<Instruction>(Result) &&
1392 !SE.DT.dominates(cast<Instruction>(Result),
1393 &*Builder.GetInsertPoint())) {
1406 Step = SE.getNegativeSCEV(Step);
1410 SCEVInsertPointGuard Guard(Builder,
this);
1413 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1422 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1423 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1425 if (TruncTy != Result->
getType()) {
1426 Result = Builder.CreateTrunc(Result, TruncTy);
1427 rememberInstruction(Result);
1431 Result = Builder.CreateSub(expandCodeFor(Normalized->
getStart(), TruncTy),
1433 rememberInstruction(Result);
1438 if (PostLoopScale) {
1439 assert(S->
isAffine() &&
"Can't linearly scale non-affine recurrences.");
1440 Result = InsertNoopCastOfTo(Result, IntTy);
1441 Result = Builder.CreateMul(Result,
1442 expandCodeFor(PostLoopScale, IntTy));
1443 rememberInstruction(Result);
1447 if (PostLoopOffset) {
1448 if (
PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1450 Value *
Base = expandCodeFor(PostLoopOffset, ExpandTy);
1451 Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1453 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1456 Result = InsertNoopCastOfTo(Result, IntTy);
1457 Result = Builder.CreateAdd(Result,
1458 expandCodeFor(PostLoopOffset, IntTy));
1459 rememberInstruction(Result);
1467 if (!CanonicalMode)
return expandAddRecExprLiterally(S);
1473 PHINode *CanonicalIV =
nullptr;
1475 if (SE.getTypeSizeInBits(PN->
getType()) >= SE.getTypeSizeInBits(Ty))
1481 SE.getTypeSizeInBits(CanonicalIV->
getType()) >
1482 SE.getTypeSizeInBits(Ty)) {
1485 NewOps[i] = SE.getAnyExtendExpr(S->
op_begin()[i], CanonicalIV->
getType());
1490 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty),
nullptr,
1498 NewOps[0] = SE.getConstant(Ty, 0);
1499 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1506 const SCEV *ExposedRest = Rest;
1513 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1515 assert(StartV->
getType() == PTy &&
"Pointer type mismatch for GEP!");
1516 return expandAddToGEP(ExposedRest, PTy, Ty, StartV);
1525 const SCEV *AddExprRHS = SE.getUnknown(
expand(Rest));
1526 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1537 rememberInstruction(CanonicalIV);
1543 if (!PredSeen.
insert(HP).second) {
1557 rememberInstruction(Add);
1567 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->
getType()) &&
1568 "IVs with types different from the canonical IV should " 1569 "already have been handled!");
1578 expand(SE.getTruncateOrNoop(
1579 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1588 const SCEV *IH = SE.getUnknown(CanonicalIV);
1591 const SCEV *NewS = S;
1592 const SCEV *
Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->
getType());
1593 if (isa<SCEVAddRecExpr>(Ext))
1596 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1600 const SCEV *
T = SE.getTruncateOrNoop(V, Ty);
1608 Value *I = Builder.CreateTrunc(V, Ty);
1609 rememberInstruction(I);
1617 Value *I = Builder.CreateZExt(V, Ty);
1618 rememberInstruction(I);
1626 Value *I = Builder.CreateSExt(V, Ty);
1627 rememberInstruction(I);
1638 Ty = SE.getEffectiveSCEVType(Ty);
1639 LHS = InsertNoopCastOfTo(LHS, Ty);
1642 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1643 rememberInstruction(ICmp);
1644 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS,
"smax");
1645 rememberInstruction(Sel);
1651 LHS = InsertNoopCastOfTo(LHS, S->
getType());
1662 Ty = SE.getEffectiveSCEVType(Ty);
1663 LHS = InsertNoopCastOfTo(LHS, Ty);
1666 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1667 rememberInstruction(ICmp);
1668 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS,
"umax");
1669 rememberInstruction(Sel);
1675 LHS = InsertNoopCastOfTo(LHS, S->
getType());
1682 return expandCodeFor(SH, Ty);
1689 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->
getType()) &&
1690 "non-trivial casts should be done with the SCEVs directly!");
1691 V = InsertNoopCastOfTo(V, Ty);
1696 ScalarEvolution::ValueOffsetPair
1697 SCEVExpander::FindValueInExprValueMap(
const SCEV *S,
1702 if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1708 for (
auto const &VOPair : *Set) {
1709 Value *V = VOPair.first;
1712 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1715 SE.DT.dominates(EntInst, InsertPt) &&
1716 (SE.LI.getLoopFor(EntInst->
getParent()) ==
nullptr ||
1722 return {
nullptr,
nullptr};
1731 Value *SCEVExpander::expand(
const SCEV *S) {
1734 Instruction *InsertPt = &*Builder.GetInsertPoint();
1735 for (
Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1737 if (SE.isLoopInvariant(S, L)) {
1740 InsertPt = Preheader->getTerminator();
1750 auto SafeToHoist = [](
const SCEV *S) {
1752 if (
const auto *
D = dyn_cast<SCEVUDivExpr>(S)) {
1753 if (
const auto *
SC = dyn_cast<SCEVConstant>(
D->getRHS()))
1755 return SC->getValue()->isZero();
1768 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L) &&
1771 while (InsertPt->
getIterator() != Builder.GetInsertPoint() &&
1772 (isInsertedInstruction(InsertPt) ||
1773 isa<DbgInfoIntrinsic>(InsertPt))) {
1780 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1781 if (I != InsertedExpressions.end())
1784 SCEVInsertPointGuard Guard(Builder,
this);
1785 Builder.SetInsertPoint(InsertPt);
1788 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1789 Value *V = VO.first;
1793 else if (VO.second) {
1796 int64_t
Offset = VO.second->getSExtValue();
1797 int64_t ESize = SE.getTypeSizeInBits(Ety);
1798 if ((Offset * 8) % ESize == 0) {
1801 V = Builder.CreateGEP(Ety, V, Idx,
"scevgep");
1805 unsigned AS = Vty->getAddressSpace();
1809 V = Builder.CreateBitCast(V, Vty);
1812 V = Builder.CreateSub(V, VO.second);
1821 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1825 void SCEVExpander::rememberInstruction(
Value *I) {
1826 if (!PostIncLoops.empty())
1827 InsertedPostIncValues.insert(I);
1829 InsertedValues.insert(I);
1843 const SCEV *
H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1847 SCEVInsertPointGuard Guard(Builder,
this);
1849 cast<PHINode>(expandCodeFor(H,
nullptr, &L->
getHeader()->
front()));
1878 unsigned NumElim = 0;
1886 if (!SE.isSCEVable(PN->
getType()))
1891 return Const->getValue();
1897 if (V->
getType() != Phi->getType())
1899 Phi->replaceAllUsesWith(V);
1903 <<
"INDVARS: Eliminated constant iv: " << *Phi <<
'\n');
1907 if (!SE.isSCEVable(Phi->getType()))
1910 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1913 if (Phi->getType()->isIntegerTy() && TTI &&
1917 const SCEV *TruncExpr =
1918 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1919 ExprToIVMap[TruncExpr] = Phi;
1935 if (OrigInc && IsomorphicInc) {
1939 if (OrigPhiRef->
getType() == Phi->getType() &&
1940 !(ChainedPhis.count(Phi) ||
1941 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1942 (ChainedPhis.count(Phi) ||
1943 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1954 const SCEV *TruncExpr =
1955 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1956 if (OrigInc != IsomorphicInc &&
1957 TruncExpr == SE.getSCEV(IsomorphicInc) &&
1958 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
1959 hoistIVInc(OrigInc, IsomorphicInc)) {
1961 dbgs() <<
"INDVARS: Eliminated congruent iv.inc: " 1962 << *IsomorphicInc <<
'\n');
1963 Value *NewInc = OrigInc;
1964 if (OrigInc->
getType() != IsomorphicInc->getType()) {
1966 if (
PHINode *PN = dyn_cast<PHINode>(OrigInc))
1974 OrigInc, IsomorphicInc->
getType(), IVName);
1984 Value *NewIV = OrigPhiRef;
1985 if (OrigPhiRef->
getType() != Phi->getType()) {
1988 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->
getType(), IVName);
1999 getRelatedExistingExpansion(S, At, L);
2000 if (VO && VO.
getValue().second ==
nullptr)
2019 if (!
match(BB->getTerminator(),
2024 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2025 return ScalarEvolution::ValueOffsetPair(LHS,
nullptr);
2027 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2028 return ScalarEvolution::ValueOffsetPair(RHS,
nullptr);
2033 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2044 bool SCEVExpander::isHighCostExpansionHelper(
2050 if (At && getRelatedExistingExpansion(S, At, L))
2059 return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
2062 return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
2065 return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
2069 if (!Processed.
insert(S).second)
2072 if (
auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
2076 if (
auto *
SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
2077 if (
SC->getAPInt().isPowerOf2()) {
2080 unsigned Width = cast<IntegerType>(UDivExpr->getType())->
getBitWidth();
2097 At = &ExitingBB->
back();
2098 if (!getRelatedExistingExpansion(
2099 SE.getAddExpr(S, SE.getConstant(S->
getType(), 1)), At, L))
2105 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
2111 if (
const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
2112 for (
auto *Op : NAry->operands())
2113 if (isHighCostExpansionHelper(Op, L, At, Processed))
2127 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2129 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2131 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2132 return expandWrapPredicate(AddRecPred, IP);
2143 Builder.SetInsertPoint(IP);
2144 auto *I = Builder.CreateICmpNE(Expr0, Expr1,
"ident.check");
2151 "non-affine expression");
2154 const SCEV *ExitCount =
2155 SE.getPredicatedBackedgeTakenCount(AR->
getLoop(), Pred);
2157 assert(ExitCount != SE.getCouldNotCompute() &&
"Invalid loop count");
2163 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->
getType());
2164 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2172 Builder.SetInsertPoint(Loc);
2173 Value *TripCountVal = expandCodeFor(ExitCount, CountTy, Loc);
2177 Type *ARExpandTy = DL.isNonIntegralPointerType(ARTy) ? ARTy : Ty;
2179 Value *StepValue = expandCodeFor(Step, Ty, Loc);
2180 Value *NegStepValue = expandCodeFor(SE.getNegativeSCEV(Step), Ty, Loc);
2181 Value *StartValue = expandCodeFor(Start, ARExpandTy, Loc);
2186 Builder.SetInsertPoint(Loc);
2189 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2192 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2197 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount},
"mul");
2198 Value *MulV = Builder.CreateExtractValue(Mul, 0,
"mul.result");
2199 Value *OfMul = Builder.CreateExtractValue(Mul, 1,
"mul.overflow");
2204 Value *
Add =
nullptr, *Sub =
nullptr;
2205 if (
PointerType *ARPtrTy = dyn_cast<PointerType>(ARExpandTy)) {
2206 const SCEV *MulS = SE.getSCEV(MulV);
2207 const SCEV *NegMulS = SE.getNegativeSCEV(MulS);
2208 Add = Builder.CreateBitCast(expandAddToGEP(MulS, ARPtrTy, Ty, StartValue),
2210 Sub = Builder.CreateBitCast(
2211 expandAddToGEP(NegMulS, ARPtrTy, Ty, StartValue), ARPtrTy);
2213 Add = Builder.CreateAdd(StartValue, MulV);
2214 Sub = Builder.CreateSub(StartValue, MulV);
2217 Value *EndCompareGT = Builder.CreateICmp(
2220 Value *EndCompareLT = Builder.CreateICmp(
2225 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2230 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2232 auto *BackedgeCheck =
2233 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2235 BackedgeCheck = Builder.CreateAnd(
2238 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2241 EndCheck = Builder.CreateOr(EndCheck, OfMul);
2247 const auto *A = cast<SCEVAddRecExpr>(Pred->
getExpr());
2248 Value *NSSWCheck =
nullptr, *NUSWCheck =
nullptr;
2252 NUSWCheck = generateOverflowCheck(A, IP,
false);
2256 NSSWCheck = generateOverflowCheck(A, IP,
true);
2258 if (NUSWCheck && NSSWCheck)
2259 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2277 auto *NextCheck = expandCodeForPredicate(Pred, IP);
2278 Builder.SetInsertPoint(IP);
2279 Check = Builder.CreateOr(Check, NextCheck);
2306 struct SCEVFindUnsafe {
2312 bool follow(
const SCEV *S) {
2321 const SCEV *Step = AR->getStepRecurrence(SE);
2322 if (!AR->isAffine() && !SE.
dominates(Step, AR->getLoop()->getHeader())) {
2329 bool isDone()
const {
return IsUnsafe; }
2335 SCEVFindUnsafe Search(SE);
2337 return !Search.IsUnsafe;
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static bool Check(DecodeStatus &Out, DecodeStatus In)
A parsed version of the target data layout string in and methods for querying it. ...
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos)
Utility for hoisting an IV increment.
static ConstantInt * getFalse(LLVMContext &Context)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static IntegerType * getInt1Ty(LLVMContext &C)
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
Value * getExactExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find existing LLVM IR value for S available at the point At.
This class represents an incoming formal argument to a Function.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
const SCEV * getConstant(ConstantInt *V)
This class represents lattice values for constants.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint, ScalarEvolution &SE)
Return true if the given expression is safe to expand in the sense that all materialized values are d...
const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE)
Normalize S to be post-increment for all loops present in Loops.
size_t getNumOperands() const
void push_back(const T &Elt)
The main scalar evolution driver.
APInt zext(unsigned width) const
Zero extend to a new width.
bool isZero() const
Return true if the expression is a constant zero.
This class represents a function call, abstracting a target machine's calling convention.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
Optional< ScalarEvolution::ValueOffsetPair > getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find the ValueOffsetPair for S.
LLVMContext & getContext() const
All values hold a context through their type.
This class represents a truncation of an integer value to a smaller integer value.
Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
const SCEV * getOperand() const
static void SimplifyAddOperands(SmallVectorImpl< const SCEV *> &Ops, Type *Ty, ScalarEvolution &SE)
SimplifyAddOperands - Sort and simplify a list of add operands.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it...
This is the base class for unary cast operator classes.
return AArch64::GPR64RegClass contains(Reg)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
iterator begin()
Instruction iterator methods.
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
bool match(Val *V, const Pattern &P)
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
Type * getPointerElementType() const
const Loop * getLoop() const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion...
This is the base class for all instructions that perform data casts.
Class to represent struct types.
A Use represents the edge between a Value definition and its users.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVMContext & getContext() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This node represents multiplication of some number of SCEVs.
Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
const APInt & getAPInt() const
BlockT * getHeader() const
ConstantInt * getValue() const
A constant value that is initialized with an expression using other constant values.
Type * getType() const
All values are typed, get the type of this value.
This node represents a polynomial recurrence on the trip count of the specified loop.
const T & getValue() const LLVM_LVALUE_FUNCTION
Class to represent array types.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
op_iterator op_begin() const
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void takeName(Value *V)
Transfer the name from V to this value.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Value * getOperand(unsigned i) const
Class to represent pointers.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
const SCEV * getOperand(unsigned i) const
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
SCEVPredicateKind getKind() const
LLVM Basic Block Representation.
This class represents a binary unsigned division operation.
The instances of the Type class are immutable: once they are created, they are never changed...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
bool isPointerTy() const
True if this is an instance of PointerType.
const Instruction & front() const
const SCEV * getExpr() const override
Implementation of the SCEVPredicate interface.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV *> &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getLHS() const
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
brc_match< Cond_t > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
const Instruction & back() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, const DataLayout &DL)
FactorOutConstant - Test if S is divisible by Factor, using signed division.
Value * expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I)
Insert code to directly compute the specified SCEV expression into the program.
op_iterator op_end() const
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Interval::pred_iterator pred_end(Interval *I)
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
self_iterator getIterator()
Class to represent integer types.
const SCEV * getStart() const
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
static Expected< BitVector > expand(StringRef S, StringRef Original)
const Function * getFunction() const
Return the function this instruction belongs to.
const SCEV * getLHS() const
Returns the left hand side of the equality.
void getExitingBlocks(SmallVectorImpl< BlockT *> &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
const SCEV * getRHS() const
Returns the right hand side of the equality.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
const SCEV * getMulExpr(SmallVectorImpl< const SCEV *> &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static void SplitAddRecs(SmallVectorImpl< const SCEV *> &Ops, Type *Ty, ScalarEvolution &SE)
SplitAddRecs - Flatten a list of add operands, moving addrec start values out to the top level...
This class represents an assumption made using SCEV expressions which can be checked at run-time...
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
void sort(IteratorTy Start, IteratorTy End)
static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, ScalarEvolution &SE)
Move parts of Base into Rest to leave Base with the minimal expression that provides a pointer operan...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
unsigned getSCEVType() const
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
unsigned getNumOperands() const
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
This is the shared class of boolean and integer constants.
Type * getType() const
Return the LLVM type of this SCEV expression.
PHINode * getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty)
This method returns the canonical induction variable of the specified type for the specified loop (in...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Module.h This file contains the declarations for the Module class.
uint64_t getSizeInBytes() const
CHAIN = SC CHAIN, Imm128 - System call.
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
unsigned logBase2() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
PHINode * getCanonicalInductionVariable() const
Check to see if the loop has a canonical induction variable: an integer recurrence that starts at 0 a...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Class for arbitrary precision integers.
This node represents an addition of some number of SCEVs.
static BasicBlock::iterator findInsertPointAfter(Instruction *I, BasicBlock *MustDominate)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
This class represents a signed maximum selection.
iterator_range< user_iterator > users()
InstListType::iterator iterator
Instruction iterators...
Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
This class represents a zero extension of a small integer value to a larger integer value...
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
LoopT * getParentLoop() const
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
uint64_t getElementOffset(unsigned Idx) const
void emplace_back(ArgTypes &&... Args)
This class represents an analyzed expression in the program.
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned getIntegerBitWidth() const
LLVM_NODISCARD bool empty() const
Represents a single loop in the control flow graph.
StringRef getName() const
Return a constant reference to the value's name.
const Function * getParent() const
Return the enclosing method, or null if none.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
This class represents a sign extension of a small integer value to a larger integer value...
This class represents an unsigned maximum selection.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const SCEV * getRHS() const
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
const SmallVectorImpl< const SCEVPredicate * > & getPredicates() const
reverse_iterator rbegin()
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This class represents a composition of other SCEV predicates, and is the class that most clients will...
bool isOne() const
Return true if the expression is a constant one.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
Module * getParent()
Get the module that this global value is contained inside of...
Value * expandEqualPredicate(const SCEVEqualPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM Value Representation.
A vector that has set insertion semantics.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static Value * SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block...
bool isIllegalInteger(uint64_t Width) const
static APInt getNullValue(unsigned numBits)
Get the '0' value.
This node is a base class providing common functionality for n'ary operators.
This class represents an assumption made on an AddRec expression.
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE)
Return true if the given expression is safe to expand in the sense that all materialized values are s...
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
This class represents an assumption that two SCEV expressions are equal, and this can be checked at r...
static IntegerType * getInt8Ty(LLVMContext &C)
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
Type * getElementType() const
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
const BasicBlock * getParent() const
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
This class represents a constant integer value.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
static Constant * get(unsigned Opcode, Constant *C1, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a unary operator constant expression, folding if possible.