30 #include "llvm/Config/llvm-config.h" 59 #define DEBUG_TYPE "memoryssa" 72 "
Memory SSA Printer", false, false)
77 "will consider trying to walk past (default = 100)"));
80 #ifdef EXPENSIVE_CHECKS 83 bool llvm::VerifyMemorySSA =
false;
104 OS <<
"; " << *MA <<
"\n";
110 OS <<
"; " << *MA <<
"\n";
123 class MemoryLocOrCall {
133 if (
auto *
C = dyn_cast<CallBase>(Inst)) {
140 if (!isa<FenceInst>(Inst))
158 if (IsCall != Other.IsCall)
162 return Loc == Other.Loc;
164 if (Call->getCalledValue() != Other.Call->getCalledValue())
167 return Call->arg_size() == Other.Call->arg_size() &&
168 std::equal(Call->arg_begin(), Call->arg_end(),
169 Other.Call->arg_begin());
200 MLOC.getCall()->getCalledValue()));
202 for (
const Value *
Arg : MLOC.getCall()->args())
207 static bool isEqual(
const MemoryLocOrCall &LHS,
const MemoryLocOrCall &RHS) {
223 bool VolatileClobber = MayClobber->
isVolatile();
225 if (VolatileUse && VolatileClobber)
241 return !(SeqCstUse || MayClobberIsAcquire);
246 struct ClobberAlias {
260 assert(DefInst &&
"Defining instruction not actually an instruction");
264 if (
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
272 switch (II->getIntrinsicID()) {
294 if (
auto *DefLoad = dyn_cast<LoadInst>(DefInst))
295 if (
auto *UseLoad = dyn_cast<LoadInst>(UseInst))
305 const MemoryLocOrCall &UseMLOC,
324 struct UpwardsMemoryQuery {
335 bool SkipSelfAccess =
false;
337 UpwardsMemoryQuery() =
default;
340 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
352 switch (II->getIntrinsicID()) {
388 bool AllowImpreciseClobber =
false) {
389 assert(MSSA.
dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?");
393 "liveOnEntry must clobber itself");
397 bool FoundClobber =
false;
403 while (!Worklist.
empty()) {
411 if (MA == ClobberAt) {
412 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
434 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
441 "Found clobber before reaching ClobberAt!");
445 if (
const auto *MU = dyn_cast<MemoryUse>(MA)) {
448 "Can only find use in def chain if Start is a use");
452 assert(isa<MemoryPhi>(MA));
464 if (AllowImpreciseClobber)
469 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
470 "ClobberAt never acted as a clobber");
477 class ClobberWalker {
493 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
497 : DefPath(Loc, Init, Init, Previous) {}
503 UpwardsMemoryQuery *
Query;
516 while ((Node = Node->
getIDom())) {
525 struct UpwardsWalkResult {
539 walkToPhiOrClobber(DefPath &Desc,
const MemoryAccess *StopAt =
nullptr,
541 assert(!isa<MemoryUse>(Desc.Last) &&
"Uses don't exist in my world");
545 if (Current == StopAt || Current == SkipStopAt)
548 if (
auto *MD = dyn_cast<MemoryDef>(Current)) {
554 return {MD,
true, CA.AR};
558 assert(isa<MemoryPhi>(Desc.Last) &&
559 "Ended at a non-clobber that's not a phi?");
560 return {Desc.Last,
false,
MayAlias};
564 ListIndex PriorNode) {
576 struct TerminatedPath {
594 assert(!PausedSearches.
empty() &&
"No searches to continue?");
598 while (!PausedSearches.
empty()) {
600 DefPath &Node = Paths[PathIndex];
620 if (!VisitedPhis.
insert({Node.Last, Node.Loc}).second)
624 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
625 assert(isa<MemoryDef>(Query->OriginalAccess));
626 SkipStopWhere = Query->OriginalAccess;
629 UpwardsWalkResult Res = walkToPhiOrClobber(Node, StopWhere,
631 if (Res.IsKnownClobber) {
632 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
635 TerminatedPath Term{Res.Result, PathIndex};
636 if (!MSSA.
dominates(Res.Result, StopWhere))
644 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
649 if (Res.Result != SkipStopWhere)
655 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
661 template <
typename T,
typename Walker>
662 struct generic_def_path_iterator
664 std::forward_iterator_tag, T *> {
665 generic_def_path_iterator() =
default;
666 generic_def_path_iterator(Walker *
W, ListIndex
N) :
W(W),
N(N) {}
670 generic_def_path_iterator &operator++() {
671 N = curNode().Previous;
675 bool operator==(
const generic_def_path_iterator &
O)
const {
676 if (N.hasValue() != O.N.hasValue())
678 return !N.hasValue() || *N == *O.N;
682 T &curNode()
const {
return W->Paths[*
N]; }
688 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
689 using const_def_path_iterator =
690 generic_def_path_iterator<const DefPath, const ClobberWalker>;
693 return make_range(def_path_iterator(
this, From), def_path_iterator());
697 return make_range(const_def_path_iterator(
this, From),
698 const_def_path_iterator());
703 TerminatedPath PrimaryClobber;
709 ListIndex defPathIndex(
const DefPath &
N)
const {
711 const DefPath *NP = &
N;
713 "Out of bounds DefPath!");
714 return NP - &Paths.
front();
733 "Reset the optimization state.");
738 auto PriorPathsSize = Paths.
size();
744 addSearches(Phi, PausedSearches, 0);
750 auto Dom = Paths.
begin();
751 for (
auto I = std::next(Dom),
E = Paths.
end();
I !=
E; ++
I)
752 if (!MSSA.
dominates(
I->Clobber, Dom->Clobber))
754 auto Last = Paths.
end() - 1;
756 std::iter_swap(Last, Dom);
762 "liveOnEntry wasn't treated as a clobber?");
764 const auto *
Target = getWalkTarget(Current);
767 assert(
all_of(TerminatedPaths, [&](
const TerminatedPath &
P) {
768 return MSSA.
dominates(P.Clobber, Target);
775 Target, PausedSearches, NewPaused, TerminatedPaths)) {
779 auto Iter =
find_if(def_path(Blocker->LastNode), [&](
const DefPath &N) {
780 return defPathIndex(N) < PriorPathsSize;
782 assert(Iter != def_path_iterator());
784 DefPath &CurNode = *Iter;
785 assert(CurNode.Last == Current);
812 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
819 if (NewPaused.
empty()) {
820 MoveDominatedPathToEnd(TerminatedPaths);
822 return {Result, std::move(TerminatedPaths)};
827 for (ListIndex Paused : NewPaused) {
828 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
829 if (WR.IsKnownClobber)
833 DefChainEnd = WR.Result;
836 if (!TerminatedPaths.
empty()) {
840 for (
auto *MA :
def_chain(const_cast<MemoryAccess *>(Target)))
846 for (
const TerminatedPath &TP : TerminatedPaths) {
849 if (DT.
dominates(ChainBB, TP.Clobber->getBlock()))
856 if (!Clobbers.
empty()) {
857 MoveDominatedPathToEnd(Clobbers);
859 return {Result, std::move(Clobbers)};
863 [&](ListIndex
I) {
return Paths[
I].Last == DefChainEnd; }));
866 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
868 PriorPathsSize = Paths.
size();
869 PausedSearches.
clear();
870 for (ListIndex I : NewPaused)
871 addSearches(DefChainPhi, PausedSearches, I);
874 Current = DefChainPhi;
878 void verifyOptResult(
const OptznResult &R)
const {
879 assert(
all_of(R.OtherClobbers, [&](
const TerminatedPath &
P) {
880 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
884 void resetPhiOptznState() {
891 : MSSA(MSSA), AA(AA), DT(DT) {}
901 if (
auto *MU = dyn_cast<MemoryUse>(Start))
902 Current = MU->getDefiningAccess();
904 DefPath FirstDesc(Q.StartingLoc, Current, Current,
None);
907 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
909 if (WalkResult.IsKnownClobber) {
910 Result = WalkResult.Result;
911 Q.AR = WalkResult.AR;
913 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
914 Current, Q.StartingLoc);
915 verifyOptResult(OptRes);
916 resetPhiOptznState();
917 Result = OptRes.PrimaryClobber.Clobber;
920 #ifdef EXPENSIVE_CHECKS 921 if (!Q.SkipSelfAccess)
930 struct RenamePassData {
937 : DTN(D), ChildIt(It), IncomingVal(M) {}
939 void swap(RenamePassData &RHS) {
951 ClobberWalker Walker;
956 : Walker(*M, *A, *D), MSSA(M) {}
988 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
989 MUD->resetOptimized();
994 Walker->verify(MSSA);
1013 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1014 MUD->resetOptimized();
1019 Walker->verify(MSSA);
1026 bool RenameAllUses) {
1029 auto It = PerBlockAccesses.find(S);
1031 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1034 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1035 if (RenameAllUses) {
1036 int PhiIndex = Phi->getBasicBlockIndex(BB);
1037 assert(PhiIndex != -1 &&
"Incomplete phi during partial rename");
1038 Phi->setIncomingValue(PhiIndex, IncomingVal);
1040 Phi->addIncoming(IncomingVal, BB);
1048 bool RenameAllUses) {
1049 auto It = PerBlockAccesses.find(BB);
1051 if (It != PerBlockAccesses.end()) {
1055 if (MUD->getDefiningAccess() ==
nullptr || RenameAllUses)
1056 MUD->setDefiningAccess(IncomingVal);
1057 if (isa<MemoryDef>(&L))
1073 bool SkipVisited,
bool RenameAllUses) {
1079 if (SkipVisited && AlreadyVisited)
1082 IncomingVal = renameBlock(Root->
getBlock(), IncomingVal, RenameAllUses);
1083 renameSuccessorPhis(Root->
getBlock(), IncomingVal, RenameAllUses);
1086 while (!WorkStack.
empty()) {
1089 IncomingVal = WorkStack.
back().IncomingVal;
1091 if (ChildIt == Node->
end()) {
1095 ++WorkStack.
back().ChildIt;
1099 AlreadyVisited = !Visited.
insert(BB).second;
1100 if (SkipVisited && AlreadyVisited) {
1106 if (
auto *BlockDefs = getWritableBlockDefs(BB))
1107 IncomingVal = &*BlockDefs->rbegin();
1109 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1110 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1119 void MemorySSA::markUnreachableAsLiveOnEntry(
BasicBlock *BB) {
1120 assert(!DT->isReachableFromEntry(BB) &&
1121 "Reachable block found while handling unreachable blocks");
1128 if (!DT->isReachableFromEntry(S))
1130 auto It = PerBlockAccesses.find(S);
1132 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1135 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1136 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1139 auto It = PerBlockAccesses.find(BB);
1140 if (It == PerBlockAccesses.end())
1143 auto &Accesses = It->second;
1144 for (
auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1145 auto Next = std::next(AI);
1148 if (
auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1149 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1151 Accesses->erase(AI);
1157 : AA(AA), DT(DT),
F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1158 SkipWalker(nullptr), NextID(0) {
1164 for (
const auto &Pair : PerBlockAccesses)
1170 auto Res = PerBlockAccesses.
insert(std::make_pair(BB,
nullptr));
1173 Res.first->second = llvm::make_unique<AccessList>();
1174 return Res.first->second.get();
1178 auto Res = PerBlockDefs.
insert(std::make_pair(BB,
nullptr));
1181 Res.first->second = llvm::make_unique<DefsList>();
1182 return Res.first->second.get();
1198 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1202 void optimizeUses();
1206 struct MemlocStackInfo {
1209 unsigned long StackEpoch;
1210 unsigned long PopEpoch;
1215 unsigned long LowerBound;
1218 unsigned long LastKill;
1223 void optimizeUsesInBlock(
const BasicBlock *,
unsigned long &,
unsigned long &,
1247 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1248 const BasicBlock *BB,
unsigned long &StackEpoch,
unsigned long &PopEpoch,
1254 if (Accesses ==
nullptr)
1261 !VersionStack.
empty() &&
1262 "Version stack should have liveOnEntry sentinel dominating everything");
1264 if (DT->dominates(BackBlock, BB))
1280 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(),
true,
None);
1284 MemoryLocOrCall UseMLOC(MU);
1285 auto &LocInfo = LocStackInfo[UseMLOC];
1289 if (LocInfo.PopEpoch != PopEpoch) {
1290 LocInfo.PopEpoch = PopEpoch;
1291 LocInfo.StackEpoch = StackEpoch;
1303 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1304 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1308 LocInfo.LowerBound = 0;
1309 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1310 LocInfo.LastKillValid =
false;
1312 }
else if (LocInfo.StackEpoch != StackEpoch) {
1316 LocInfo.PopEpoch = PopEpoch;
1317 LocInfo.StackEpoch = StackEpoch;
1319 if (!LocInfo.LastKillValid) {
1320 LocInfo.LastKill = VersionStack.
size() - 1;
1321 LocInfo.LastKillValid =
true;
1327 assert(LocInfo.LowerBound < VersionStack.
size() &&
1328 "Lower bound out of range");
1329 assert(LocInfo.LastKill < VersionStack.
size() &&
1330 "Last kill info out of range");
1332 unsigned long UpperBound = VersionStack.
size() - 1;
1335 LLVM_DEBUG(
dbgs() <<
"MemorySSA skipping optimization of " << *MU <<
" (" 1336 << *(MU->getMemoryInst()) <<
")" 1337 <<
" because there are " 1338 << UpperBound - LocInfo.LowerBound
1339 <<
" stores to disambiguate\n");
1342 LocInfo.LastKillValid =
false;
1345 bool FoundClobberResult =
false;
1346 while (UpperBound > LocInfo.LowerBound) {
1347 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1350 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1352 while (VersionStack[UpperBound] != Result) {
1356 FoundClobberResult =
true;
1360 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1363 if (!UseMLOC.IsCall &&
lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1366 FoundClobberResult =
true;
1372 FoundClobberResult =
true;
1383 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1385 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1387 MU->setDefiningAccess(VersionStack[UpperBound],
true, LocInfo.AR);
1388 LocInfo.LastKill = UpperBound;
1392 MU->setDefiningAccess(VersionStack[LocInfo.LastKill],
true, LocInfo.AR);
1394 LocInfo.LowerBound = VersionStack.
size() - 1;
1395 LocInfo.LowerBoundBlock = BB;
1403 VersionStack.
push_back(MSSA->getLiveOnEntryDef());
1405 unsigned long StackEpoch = 1;
1406 unsigned long PopEpoch = 1;
1408 for (
const auto *DomNode :
depth_first(DT->getRootNode()))
1409 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1413 void MemorySSA::placePHINodes(
1422 for (
auto &BB : IDFBlocks)
1423 createMemoryPhi(BB);
1426 void MemorySSA::buildMemorySSA() {
1434 LiveOnEntryDef.reset(
new MemoryDef(
F.getContext(),
nullptr,
nullptr,
1435 &StartingPoint, NextID++));
1444 bool InsertIntoDef =
false;
1453 Accesses = getOrCreateAccessList(&B);
1455 if (isa<MemoryDef>(MUD)) {
1456 InsertIntoDef =
true;
1458 Defs = getOrCreateDefsList(&B);
1463 DefiningBlocks.
insert(&B);
1465 placePHINodes(DefiningBlocks);
1470 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1479 if (!Visited.
count(&BB))
1480 markUnreachableAsLiveOnEntry(&BB);
1487 return Walker.get();
1490 WalkerBase = llvm::make_unique<ClobberWalkerBase>(
this, AA, DT);
1492 Walker = llvm::make_unique<CachingWalker>(
this, WalkerBase.get());
1493 return Walker.get();
1498 return SkipWalker.get();
1501 WalkerBase = llvm::make_unique<ClobberWalkerBase>(
this, AA, DT);
1503 SkipWalker = llvm::make_unique<SkipSelfWalker>(
this, WalkerBase.get());
1504 return SkipWalker.get();
1514 auto *Accesses = getOrCreateAccessList(BB);
1518 if (isa<MemoryPhi>(NewAccess)) {
1519 Accesses->push_front(NewAccess);
1520 auto *Defs = getOrCreateDefsList(BB);
1521 Defs->push_front(*NewAccess);
1524 *Accesses, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1525 Accesses->insert(AI, NewAccess);
1526 if (!isa<MemoryUse>(NewAccess)) {
1527 auto *Defs = getOrCreateDefsList(BB);
1529 *Defs, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1530 Defs->insert(DI, *NewAccess);
1534 Accesses->push_back(NewAccess);
1535 if (!isa<MemoryUse>(NewAccess)) {
1536 auto *Defs = getOrCreateDefsList(BB);
1537 Defs->push_back(*NewAccess);
1540 BlockNumberingValid.erase(BB);
1546 bool WasEnd = InsertPt == Accesses->end();
1548 if (!isa<MemoryUse>(What)) {
1549 auto *Defs = getOrCreateDefsList(BB);
1555 Defs->push_back(*What);
1556 }
else if (isa<MemoryDef>(InsertPt)) {
1557 Defs->insert(InsertPt->getDefsIterator(), *What);
1559 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1562 if (InsertPt == Accesses->end())
1563 Defs->push_back(*What);
1565 Defs->insert(InsertPt->getDefsIterator(), *What);
1568 BlockNumberingValid.erase(BB);
1578 if (
auto *MD = dyn_cast<MemoryDef>(What))
1579 MD->resetOptimized();
1589 prepareForMoveTo(What, BB);
1595 if (isa<MemoryPhi>(What)) {
1597 "Can only move a Phi at the beginning of the block");
1599 ValueToMemoryAccess.erase(What->
getBlock());
1600 bool Inserted = ValueToMemoryAccess.insert({BB, What}).
second;
1602 assert(Inserted &&
"Cannot move a Phi to a block that already has one");
1605 prepareForMoveTo(What, BB);
1614 ValueToMemoryAccess[BB] = Phi;
1621 assert(!isa<PHINode>(I) &&
"Cannot create a defined access for a PHI");
1624 NewAccess !=
nullptr &&
1625 "Tried to create a memory access for a non-memory touching instruction");
1634 if (
auto *
SI = dyn_cast<StoreInst>(I)) {
1635 if (!
SI->isUnordered())
1637 }
else if (
auto *LI = dyn_cast<LoadInst>(I)) {
1638 if (!LI->isUnordered())
1657 Def = dyn_cast_or_null<MemoryDef>(Template) !=
nullptr;
1658 Use = dyn_cast_or_null<MemoryUse>(Template) !=
nullptr;
1659 #if !defined(NDEBUG) 1661 bool DefCheck, UseCheck;
1664 assert(Def == DefCheck && (Def || Use == UseCheck) &&
"Invalid template");
1691 ValueToMemoryAccess[
I] = MUD;
1696 bool MemorySSA::dominatesUse(
const MemoryAccess *Replacer,
1698 if (isa<MemoryUseOrDef>(Replacee))
1700 const auto *MP = cast<MemoryPhi>(Replacee);
1704 for (
const Use &
Arg : MP->operands()) {
1705 if (
Arg.get() != Replacee &&
1706 !DT->dominates(Replacer->
getBlock(), MP->getIncomingBlock(
Arg)))
1715 "Trying to remove memory access that still has uses");
1716 BlockNumbering.erase(MA);
1717 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1718 MUD->setDefiningAccess(
nullptr);
1720 if (!isa<MemoryUse>(MA))
1721 Walker->invalidateInfo(MA);
1724 if (
const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1725 MemoryInst = MUD->getMemoryInst();
1729 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1730 if (VMA->second == MA)
1731 ValueToMemoryAccess.erase(VMA);
1744 if (!isa<MemoryUse>(MA)) {
1745 auto DefsIt = PerBlockDefs.find(BB);
1746 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1749 PerBlockDefs.erase(DefsIt);
1754 auto AccessIt = PerBlockAccesses.find(BB);
1755 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1757 Accesses->erase(MA);
1759 Accesses->remove(MA);
1761 if (Accesses->empty()) {
1762 PerBlockAccesses.erase(AccessIt);
1763 BlockNumberingValid.erase(BB);
1769 F.print(OS, &Writer);
1772 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1781 Walker->verify(
this);
1787 if (
const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1788 if (!MUD->isOptimized())
1790 auto *I = MUD->getMemoryInst();
1794 auto *Clobber = MUD->getOptimized();
1795 UpwardsMemoryQuery Q(I, MUD);
1801 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) 1816 if (BlockNumberingValid.empty())
1821 if (!ValidBlocks.
count(&BB))
1824 ValidBlocks.
erase(&BB);
1832 unsigned long LastNumber = 0;
1834 auto ThisNumberIter = BlockNumbering.find(&MA);
1835 assert(ThisNumberIter != BlockNumbering.end() &&
1836 "MemoryAccess has no domination number in a valid block!");
1838 unsigned long ThisNumber = ThisNumberIter->second;
1839 assert(ThisNumber > LastNumber &&
1840 "Domination numbers should be strictly increasing!");
1841 LastNumber = ThisNumber;
1846 "All valid BasicBlocks should exist in F -- dangling pointers?");
1870 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1871 "We have memory affecting instructions " 1872 "in this block but they are not in the " 1873 "access list or defs list");
1876 if (isa<MemoryDef>(MA))
1885 assert(AL->size() == ActualAccesses.
size() &&
1886 "We don't have the same number of accesses in the block as on the " 1889 "Either we should have a defs list, or we should have no defs");
1890 assert((!DL || DL->size() == ActualDefs.
size()) &&
1891 "We don't have the same number of defs in the block as on the " 1893 auto ALI = AL->begin();
1894 auto AAI = ActualAccesses.
begin();
1895 while (ALI != AL->end() && AAI != ActualAccesses.
end()) {
1896 assert(&*ALI == *AAI &&
"Not the same accesses in the same order");
1900 ActualAccesses.
clear();
1902 auto DLI = DL->begin();
1903 auto ADI = ActualDefs.
begin();
1904 while (DLI != DL->end() && ADI != ActualDefs.
end()) {
1905 assert(&*DLI == *ADI &&
"Not the same defs in the same order");
1922 for (
const Use &U : MP->
uses())
1930 for (
const Use &U : MD->
uses())
1944 "Null def but use not point to live on entry def");
1947 "Did not find use in def's use list");
1959 assert(Phi->getNumOperands() ==
static_cast<unsigned>(std::distance(
1961 "Incomplete MemoryPhi Node");
1962 for (
unsigned I = 0,
E = Phi->getNumIncomingValues(); I !=
E; ++
I) {
1963 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1966 "Incoming phi block not a block predecessor");
1972 verifyUseInDefs(MA->getDefiningAccess(), MA);
1985 void MemorySSA::renumberBlock(
const BasicBlock *
B)
const {
1987 unsigned long CurrentNumber = 0;
1989 assert(AL !=
nullptr &&
"Asking to renumber an empty block");
1990 for (
const auto &I : *AL)
1991 BlockNumbering[&
I] = ++CurrentNumber;
1992 BlockNumberingValid.insert(B);
2003 "Asking for local domination when accesses are in different blocks!");
2005 if (Dominatee == Dominator)
2018 if (!BlockNumberingValid.count(DominatorBlock))
2019 renumberBlock(DominatorBlock);
2021 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2023 assert(DominatorNum != 0 &&
"Block was not numbered properly");
2024 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2025 assert(DominateeNum != 0 &&
"Block was not numbered properly");
2026 return DominatorNum < DominateeNum;
2031 if (Dominator == Dominatee)
2043 const Use &Dominatee)
const {
2045 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2047 if (UseBB != Dominator->
getBlock())
2048 return DT->dominates(Dominator->
getBlock(), UseBB);
2059 switch (getValueID()) {
2060 case MemoryPhiVal:
return static_cast<const MemoryPhi *
>(
this)->
print(OS);
2061 case MemoryDefVal:
return static_cast<const MemoryDef *
>(
this)->
print(OS);
2062 case MemoryUseVal:
return static_cast<const MemoryUse *
>(
this)->
print(OS);
2071 if (A && A->getID())
2077 OS <<
getID() <<
" = MemoryDef(";
2081 if (isOptimized()) {
2083 printID(getOptimized());
2092 OS <<
getID() <<
" = MemoryPhi(";
2093 for (
const auto &
Op : operands()) {
2107 if (
unsigned ID = MA->
getID())
2119 if (UO && UO->
getID())
2131 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2149 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2152 MSSA.verifyMemorySSA();
2167 OS <<
"MemorySSA for function: " << F.
getName() <<
"\n";
2195 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2196 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2215 if (isa<MemoryPhi>(StartingAccess))
2216 return StartingAccess;
2218 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2220 return StartingUseOrDef;
2222 Instruction *I = StartingUseOrDef->getMemoryInst();
2227 return StartingUseOrDef;
2229 UpwardsMemoryQuery Q;
2230 Q.OriginalAccess = StartingUseOrDef;
2231 Q.StartingLoc = Loc;
2238 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2239 ? StartingUseOrDef->getDefiningAccess()
2242 MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q);
2243 LLVM_DEBUG(
dbgs() <<
"Starting Memory SSA clobber for " << *I <<
" is ");
2245 LLVM_DEBUG(
dbgs() <<
"Final Memory SSA clobber for " << *I <<
" is ");
2255 if (!StartingAccess)
2258 bool IsOptimized =
false;
2263 if (StartingAccess->isOptimized()) {
2264 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2265 return StartingAccess->getOptimized();
2269 const Instruction *I = StartingAccess->getMemoryInst();
2274 return StartingAccess;
2276 UpwardsMemoryQuery Q(I, StartingAccess);
2280 StartingAccess->setOptimized(LiveOnEntry);
2281 StartingAccess->setOptimizedAccessType(
None);
2288 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2293 StartingAccess->setOptimized(DefiningAccess);
2294 StartingAccess->setOptimizedAccessType(
None);
2295 return DefiningAccess;
2298 OptimizedAccess = Walker.findClobber(DefiningAccess, Q);
2299 StartingAccess->setOptimized(OptimizedAccess);
2301 StartingAccess->setOptimizedAccessType(
None);
2303 StartingAccess->setOptimizedAccessType(
MustAlias);
2305 OptimizedAccess = StartingAccess->getOptimized();
2307 LLVM_DEBUG(
dbgs() <<
"Starting Memory SSA clobber for " << *I <<
" is ");
2309 LLVM_DEBUG(
dbgs() <<
"Optimized Memory SSA clobber for " << *I <<
" is ");
2313 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2314 isa<MemoryDef>(StartingAccess)) {
2315 assert(isa<MemoryDef>(Q.OriginalAccess));
2316 Q.SkipSelfAccess =
true;
2317 Result = Walker.findClobber(OptimizedAccess, Q);
2319 Result = OptimizedAccess;
2321 LLVM_DEBUG(
dbgs() <<
"Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2322 LLVM_DEBUG(
dbgs() <<
"] for " << *I <<
" is " << *Result <<
"\n");
2329 return Walker->getClobberingMemoryAccessBase(MA,
false);
2335 return Walker->getClobberingMemoryAccessBase(MA, Loc);
2340 return Walker->getClobberingMemoryAccessBase(MA,
true);
2346 return Walker->getClobberingMemoryAccessBase(MA, Loc);
2351 if (
auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2352 return Use->getDefiningAccess();
2358 if (
auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2359 return Use->getDefiningAccess();
2360 return StartingAccess;
MemorySSAWalker * getWalker()
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
The access may reference and may modify the value stored in memory.
void initializeMemorySSAWrapperPassPass(PassRegistry &)
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
typename std::vector< DomTreeNodeBase *>::const_iterator const_iterator
iterator_range< use_iterator > uses()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Value * getPointerOperand(Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
virtual void verify(const MemorySSA *MSSA)
void dropAllReferences()
Drop all references to operands.
Atomic ordering constants.
MemorySSAPrinterLegacyPass()
bool VerifyMemorySSA
Enables verification of MemorySSA.
bool isFenceLike() const
Return true if this instruction behaves like a memory fence: it can load or store to memory location ...
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void print(raw_ostream &OS) const
This class represents lattice values for constants.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
A Module instance is used to store all the information related to an LLVM module. ...
void push_back(reference Node)
Insert a node at the back; never copies.
This class provides various memory handling functions that manipulate MemoryBlock instances...
Implements a dense probed hash-table based set.
void push_back(const T &Elt)
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
The two locations do not alias at all.
Extension point for the Value hierarchy.
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
static const char LiveOnEntryStr[]
LLVMContext & getContext() const
All values hold a context through their type.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Analysis pass which computes a DominatorTree.
OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, DominatorTree *DT)
block Block Frequency true
An instruction for reading from memory.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
This defines the Use class.
void print(raw_ostream &OS) const
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
static Optional< MemoryLocation > getOrNone(const Instruction *Inst)
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
LLVMContext & getContext() const
Get the context in which this basic block lives.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, true) INITIALIZE_PASS_END(MemorySSAWrapperPass
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Represents read-only accesses to memory.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Legacy analysis pass which computes MemorySSA.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
void verify(const MemorySSA *MSSA)
A MemorySSAWalker that does AA walks to disambiguate accesses.
A Use represents the edge between a Value definition and its users.
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
MemorySSAAnnotatedWriter(const MemorySSA *M)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Encapsulates MemorySSA, including all data associated with memory accesses.
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
static bool isOrdered(const Instruction *I)
APInt operator*(APInt a, uint64_t RHS)
void verifyDefUses(Function &F) const
Verify the immediate use information, by walking all the memory accesses and verifying that...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
A simple intrusive list implementation.
LLVM_NODISCARD bool isMustSet(const ModRefInfo MRI)
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
static int getID(struct InternalInstruction *insn, const void *miiArg)
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
upward_defs_iterator upward_defs_end()
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void emitBasicBlockStartAnnot(const BasicBlock *BB, formatted_raw_ostream &OS) override
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
This is the generic walker interface for walkers of MemorySSA.
static void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, AliasAnalysis &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, AliasAnalysis &AA)
bool isAtLeastOrStrongerThan(AtomicOrdering ao, AtomicOrdering other)
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
An assembly annotator class to print Memory SSA information in comments.
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
MemorySSAWalker * getSkipSelfWalker()
LLVM Basic Block Representation.
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
A manager for alias analyses.
std::pair< iterator, bool > insert(const ValueT &V)
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
early cse Early CSE w MemorySSA
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Represent the analysis usage information of a pass.
void print(raw_ostream &OS) const
FunctionPass class - This class is used to implement most global optimizations.
Interval::pred_iterator pred_end(Interval *I)
void verifyDomination(Function &F) const
Verify the domination properties of MemorySSA by checking that each definition dominates all of its u...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Memory true print Memory SSA Printer
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
void checkClobberSanityAccess(const MemoryAccess *MA) const
Check clobber sanity for an access.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Compute iterated dominance frontiers using a linear time algorithm.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
Memory true print Memory SSA static false cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr)
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
The two locations may or may not alias. This is the least precise result.
Representation for a specific memory location.
The two locations precisely alias each other.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void verifyOrdering(Function &F) const
Verify that the order and existence of MemoryAccesses matches the order and existence of memory affec...
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
unsigned getNumOperands() const
base_list_type::iterator iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
BlockVerifier::State From
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false...
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
An analysis that produces MemorySSA for a function.
LLVM_NODISCARD T pop_back_val()
static ClobberAlias instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysis &AA)
BasicBlock * getBlock() const
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
pred_range predecessors(BasicBlock *BB)
void verify(const MemorySSA *MSSA) override
void emitInstructionAnnot(const Instruction *I, formatted_raw_ostream &OS) override
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
MemoryAccess * getLiveOnEntryDef() const
void verifyClobberSanity(const Function &F) const
void print(raw_ostream &OS) const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A range adaptor for a pair of iterators.
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
Target - Wrapper for Target specific information.
void push_back(pointer val)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Class that has the common methods + fields of memory uses/defs.
void setPreservesAll()
Set by analyses that do not transform their input at all.
iterator_range< user_iterator > users()
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are must-alias.
An opaque object representing a hash code.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
amdgpu Simplify well known AMD library false Value Value * Arg
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry &)
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
void verify(const MemorySSA *MSSA) override
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
MemorySSAWalker(MemorySSA *)
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
void emplace_back(ArgTypes &&... Args)
LLVM_NODISCARD bool empty() const
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
This file provides utility analysis objects describing memory locations.
StringRef getName() const
Return a constant reference to the value's name.
CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
AnalysisUsage & addRequiredTransitive()
MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &)
Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...
iterator_range< df_iterator< T > > depth_first(const T &G)
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Result run(Function &F, FunctionAnalysisManager &AM)
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
LLVM Value Representation.
static MemoryLocOrCall getTombstoneKey()
succ_range successors(Instruction *I)
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, const Instruction *I)
This class implements an extremely fast bulk output stream that can only output to a stream...
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
LLVM_NODISCARD bool isModOrRefSet(const ModRefInfo MRI)
bool operator==(uint64_t V1, const APInt &V2)
void verifyDominationNumbers(const Function &F) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Represents phi nodes for memory accesses.
void print(raw_ostream &) const
This header defines various interfaces for pass management in LLVM.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
static MemoryLocOrCall getEmptyKey()
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
A special type used by analysis passes to provide an address that identifies that particular analysis...
LocationClass< Ty > location(Ty &L)
reverse_iterator rbegin()
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
A wrapper class for inspecting calls to intrinsic functions.
const BasicBlock * getParent() const
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
LLVM_NODISCARD bool isRefSet(const ModRefInfo MRI)