LLVM  8.0.1
MemorySSAUpdater.cpp
Go to the documentation of this file.
1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------===//
9 //
10 // This file implements the MemorySSAUpdater class.
11 //
12 //===----------------------------------------------------------------===//
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/Dominators.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Metadata.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/Debug.h"
28 #include <algorithm>
29 
30 #define DEBUG_TYPE "memoryssa"
31 using namespace llvm;
32 
33 // This is the marker algorithm from "Simple and Efficient Construction of
34 // Static Single Assignment Form"
35 // The simple, non-marker algorithm places phi nodes at any join
36 // Here, we place markers, and only place phi nodes if they end up necessary.
37 // They are only necessary if they break a cycle (IE we recursively visit
38 // ourselves again), or we discover, while getting the value of the operands,
39 // that there are two or more definitions needing to be merged.
40 // This still will leave non-minimal form in the case of irreducible control
41 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
42 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
43  BasicBlock *BB,
44  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
45  // First, do a cache lookup. Without this cache, certain CFG structures
46  // (like a series of if statements) take exponential time to visit.
47  auto Cached = CachedPreviousDef.find(BB);
48  if (Cached != CachedPreviousDef.end()) {
49  return Cached->second;
50  }
51 
52  if (BasicBlock *Pred = BB->getSinglePredecessor()) {
53  // Single predecessor case, just recurse, we can only have one definition.
54  MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
55  CachedPreviousDef.insert({BB, Result});
56  return Result;
57  }
58 
59  if (VisitedBlocks.count(BB)) {
60  // We hit our node again, meaning we had a cycle, we must insert a phi
61  // node to break it so we have an operand. The only case this will
62  // insert useless phis is if we have irreducible control flow.
63  MemoryAccess *Result = MSSA->createMemoryPhi(BB);
64  CachedPreviousDef.insert({BB, Result});
65  return Result;
66  }
67 
68  if (VisitedBlocks.insert(BB).second) {
69  // Mark us visited so we can detect a cycle
71 
72  // Recurse to get the values in our predecessors for placement of a
73  // potential phi node. This will insert phi nodes if we cycle in order to
74  // break the cycle and have an operand.
75  for (auto *Pred : predecessors(BB))
76  PhiOps.push_back(getPreviousDefFromEnd(Pred, CachedPreviousDef));
77 
78  // Now try to simplify the ops to avoid placing a phi.
79  // This may return null if we never created a phi yet, that's okay
80  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
81 
82  // See if we can avoid the phi by simplifying it.
83  auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
84  // If we couldn't simplify, we may have to create a phi
85  if (Result == Phi) {
86  if (!Phi)
87  Phi = MSSA->createMemoryPhi(BB);
88 
89  // See if the existing phi operands match what we need.
90  // Unlike normal SSA, we only allow one phi node per block, so we can't just
91  // create a new one.
92  if (Phi->getNumOperands() != 0) {
93  // FIXME: Figure out whether this is dead code and if so remove it.
94  if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
95  // These will have been filled in by the recursive read we did above.
96  llvm::copy(PhiOps, Phi->op_begin());
97  std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
98  }
99  } else {
100  unsigned i = 0;
101  for (auto *Pred : predecessors(BB))
102  Phi->addIncoming(&*PhiOps[i++], Pred);
103  InsertedPHIs.push_back(Phi);
104  }
105  Result = Phi;
106  }
107 
108  // Set ourselves up for the next variable by resetting visited state.
109  VisitedBlocks.erase(BB);
110  CachedPreviousDef.insert({BB, Result});
111  return Result;
112  }
113  llvm_unreachable("Should have hit one of the three cases above");
114 }
115 
116 // This starts at the memory access, and goes backwards in the block to find the
117 // previous definition. If a definition is not found the block of the access,
118 // it continues globally, creating phi nodes to ensure we have a single
119 // definition.
120 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
121  if (auto *LocalResult = getPreviousDefInBlock(MA))
122  return LocalResult;
124  return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
125 }
126 
127 // This starts at the memory access, and goes backwards in the block to the find
128 // the previous definition. If the definition is not found in the block of the
129 // access, it returns nullptr.
130 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
131  auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
132 
133  // It's possible there are no defs, or we got handed the first def to start.
134  if (Defs) {
135  // If this is a def, we can just use the def iterators.
136  if (!isa<MemoryUse>(MA)) {
137  auto Iter = MA->getReverseDefsIterator();
138  ++Iter;
139  if (Iter != Defs->rend())
140  return &*Iter;
141  } else {
142  // Otherwise, have to walk the all access iterator.
143  auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
144  for (auto &U : make_range(++MA->getReverseIterator(), End))
145  if (!isa<MemoryUse>(U))
146  return cast<MemoryAccess>(&U);
147  // Note that if MA comes before Defs->begin(), we won't hit a def.
148  return nullptr;
149  }
150  }
151  return nullptr;
152 }
153 
154 // This starts at the end of block
155 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
156  BasicBlock *BB,
157  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
158  auto *Defs = MSSA->getWritableBlockDefs(BB);
159 
160  if (Defs)
161  return &*Defs->rbegin();
162 
163  return getPreviousDefRecursive(BB, CachedPreviousDef);
164 }
165 // Recurse over a set of phi uses to eliminate the trivial ones
166 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
167  if (!Phi)
168  return nullptr;
169  TrackingVH<MemoryAccess> Res(Phi);
171  std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
172  for (auto &U : Uses) {
173  if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) {
174  auto OperRange = UsePhi->operands();
175  tryRemoveTrivialPhi(UsePhi, OperRange);
176  }
177  }
178  return Res;
179 }
180 
181 // Eliminate trivial phis
182 // Phis are trivial if they are defined either by themselves, or all the same
183 // argument.
184 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
185 // We recursively try to remove them.
186 template <class RangeType>
187 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
188  RangeType &Operands) {
189  // Bail out on non-opt Phis.
190  if (NonOptPhis.count(Phi))
191  return Phi;
192 
193  // Detect equal or self arguments
194  MemoryAccess *Same = nullptr;
195  for (auto &Op : Operands) {
196  // If the same or self, good so far
197  if (Op == Phi || Op == Same)
198  continue;
199  // not the same, return the phi since it's not eliminatable by us
200  if (Same)
201  return Phi;
202  Same = cast<MemoryAccess>(&*Op);
203  }
204  // Never found a non-self reference, the phi is undef
205  if (Same == nullptr)
206  return MSSA->getLiveOnEntryDef();
207  if (Phi) {
208  Phi->replaceAllUsesWith(Same);
209  removeMemoryAccess(Phi);
210  }
211 
212  // We should only end up recursing in case we replaced something, in which
213  // case, we may have made other Phis trivial.
214  return recursePhi(Same);
215 }
216 
218  InsertedPHIs.clear();
219  MU->setDefiningAccess(getPreviousDef(MU));
220  // Unlike for defs, there is no extra work to do. Because uses do not create
221  // new may-defs, there are only two cases:
222  //
223  // 1. There was a def already below us, and therefore, we should not have
224  // created a phi node because it was already needed for the def.
225  //
226  // 2. There is no def below us, and therefore, there is no extra renaming work
227  // to do.
228 }
229 
230 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
231 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
232  MemoryAccess *NewDef) {
233  // Replace any operand with us an incoming block with the new defining
234  // access.
235  int i = MP->getBasicBlockIndex(BB);
236  assert(i != -1 && "Should have found the basic block in the phi");
237  // We can't just compare i against getNumOperands since one is signed and the
238  // other not. So use it to index into the block iterator.
239  for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
240  ++BBIter) {
241  if (*BBIter != BB)
242  break;
243  MP->setIncomingValue(i, NewDef);
244  ++i;
245  }
246 }
247 
248 // A brief description of the algorithm:
249 // First, we compute what should define the new def, using the SSA
250 // construction algorithm.
251 // Then, we update the defs below us (and any new phi nodes) in the graph to
252 // point to the correct new defs, to ensure we only have one variable, and no
253 // disconnected stores.
254 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
255  InsertedPHIs.clear();
256 
257  // See if we had a local def, and if not, go hunting.
258  MemoryAccess *DefBefore = getPreviousDef(MD);
259  bool DefBeforeSameBlock = DefBefore->getBlock() == MD->getBlock();
260 
261  // There is a def before us, which means we can replace any store/phi uses
262  // of that thing with us, since we are in the way of whatever was there
263  // before.
264  // We now define that def's memorydefs and memoryphis
265  if (DefBeforeSameBlock) {
266  for (auto UI = DefBefore->use_begin(), UE = DefBefore->use_end();
267  UI != UE;) {
268  Use &U = *UI++;
269  // Leave the MemoryUses alone.
270  // Also make sure we skip ourselves to avoid self references.
271  if (isa<MemoryUse>(U.getUser()) || U.getUser() == MD)
272  continue;
273  U.set(MD);
274  }
275  }
276 
277  // and that def is now our defining access.
278  MD->setDefiningAccess(DefBefore);
279 
280  SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
281  if (!DefBeforeSameBlock) {
282  // If there was a local def before us, we must have the same effect it
283  // did. Because every may-def is the same, any phis/etc we would create, it
284  // would also have created. If there was no local def before us, we
285  // performed a global update, and have to search all successors and make
286  // sure we update the first def in each of them (following all paths until
287  // we hit the first def along each path). This may also insert phi nodes.
288  // TODO: There are other cases we can skip this work, such as when we have a
289  // single successor, and only used a straight line of single pred blocks
290  // backwards to find the def. To make that work, we'd have to track whether
291  // getDefRecursive only ever used the single predecessor case. These types
292  // of paths also only exist in between CFG simplifications.
293  FixupList.push_back(MD);
294  }
295 
296  while (!FixupList.empty()) {
297  unsigned StartingPHISize = InsertedPHIs.size();
298  fixupDefs(FixupList);
299  FixupList.clear();
300  // Put any new phis on the fixup list, and process them
301  FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
302  }
303  // Now that all fixups are done, rename all uses if we are asked.
304  if (RenameUses) {
306  BasicBlock *StartBlock = MD->getBlock();
307  // We are guaranteed there is a def in the block, because we just got it
308  // handed to us in this function.
309  MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
310  // Convert to incoming value if it's a memorydef. A phi *is* already an
311  // incoming value.
312  if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
313  FirstDef = MD->getDefiningAccess();
314 
315  MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
316  // We just inserted a phi into this block, so the incoming value will become
317  // the phi anyway, so it does not matter what we pass.
318  for (auto &MP : InsertedPHIs) {
319  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
320  if (Phi)
321  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
322  }
323  }
324 }
325 
326 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
329  for (auto &Var : Vars) {
330  MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
331  if (!NewDef)
332  continue;
333  // First, see if there is a local def after the operand.
334  auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
335  auto DefIter = NewDef->getDefsIterator();
336 
337  // The temporary Phi is being fixed, unmark it for not to optimize.
338  if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
339  NonOptPhis.erase(Phi);
340 
341  // If there is a local def after us, we only have to rename that.
342  if (++DefIter != Defs->end()) {
343  cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
344  continue;
345  }
346 
347  // Otherwise, we need to search down through the CFG.
348  // For each of our successors, handle it directly if their is a phi, or
349  // place on the fixup worklist.
350  for (const auto *S : successors(NewDef->getBlock())) {
351  if (auto *MP = MSSA->getMemoryAccess(S))
352  setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
353  else
354  Worklist.push_back(S);
355  }
356 
357  while (!Worklist.empty()) {
358  const BasicBlock *FixupBlock = Worklist.back();
359  Worklist.pop_back();
360 
361  // Get the first def in the block that isn't a phi node.
362  if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
363  auto *FirstDef = &*Defs->begin();
364  // The loop above and below should have taken care of phi nodes
365  assert(!isa<MemoryPhi>(FirstDef) &&
366  "Should have already handled phi nodes!");
367  // We are now this def's defining access, make sure we actually dominate
368  // it
369  assert(MSSA->dominates(NewDef, FirstDef) &&
370  "Should have dominated the new access");
371 
372  // This may insert new phi nodes, because we are not guaranteed the
373  // block we are processing has a single pred, and depending where the
374  // store was inserted, it may require phi nodes below it.
375  cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
376  return;
377  }
378  // We didn't find a def, so we must continue.
379  for (const auto *S : successors(FixupBlock)) {
380  // If there is a phi node, handle it.
381  // Otherwise, put the block on the worklist
382  if (auto *MP = MSSA->getMemoryAccess(S))
383  setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
384  else {
385  // If we cycle, we should have ended up at a phi node that we already
386  // processed. FIXME: Double check this
387  if (!Seen.insert(S).second)
388  continue;
389  Worklist.push_back(S);
390  }
391  }
392  }
393  }
394 }
395 
397  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
398  MPhi->unorderedDeleteIncomingBlock(From);
399  if (MPhi->getNumIncomingValues() == 1)
400  removeMemoryAccess(MPhi);
401  }
402 }
403 
405  BasicBlock *To) {
406  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
407  bool Found = false;
408  MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
409  if (From != B)
410  return false;
411  if (Found)
412  return true;
413  Found = true;
414  return false;
415  });
416  if (MPhi->getNumIncomingValues() == 1)
417  removeMemoryAccess(MPhi);
418  }
419 }
420 
421 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
422  const ValueToValueMapTy &VMap,
423  PhiToDefMap &MPhiMap) {
424  auto GetNewDefiningAccess = [&](MemoryAccess *MA) -> MemoryAccess * {
425  MemoryAccess *InsnDefining = MA;
426  if (MemoryUseOrDef *DefMUD = dyn_cast<MemoryUseOrDef>(InsnDefining)) {
427  if (!MSSA->isLiveOnEntryDef(DefMUD)) {
428  Instruction *DefMUDI = DefMUD->getMemoryInst();
429  assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
430  if (Instruction *NewDefMUDI =
431  cast_or_null<Instruction>(VMap.lookup(DefMUDI)))
432  InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
433  }
434  } else {
435  MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
436  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
437  InsnDefining = NewDefPhi;
438  }
439  assert(InsnDefining && "Defining instruction cannot be nullptr.");
440  return InsnDefining;
441  };
442 
443  const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
444  if (!Acc)
445  return;
446  for (const MemoryAccess &MA : *Acc) {
447  if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
448  Instruction *Insn = MUD->getMemoryInst();
449  // Entry does not exist if the clone of the block did not clone all
450  // instructions. This occurs in LoopRotate when cloning instructions
451  // from the old header to the old preheader. The cloned instruction may
452  // also be a simplified Value, not an Instruction (see LoopRotate).
453  if (Instruction *NewInsn =
454  dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
455  MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
456  NewInsn, GetNewDefiningAccess(MUD->getDefiningAccess()), MUD);
457  MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
458  }
459  }
460  }
461 }
462 
464  ArrayRef<BasicBlock *> ExitBlocks,
465  const ValueToValueMapTy &VMap,
466  bool IgnoreIncomingWithNoClones) {
467  PhiToDefMap MPhiMap;
468 
469  auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
470  assert(Phi && NewPhi && "Invalid Phi nodes.");
471  BasicBlock *NewPhiBB = NewPhi->getBlock();
472  SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
473  pred_end(NewPhiBB));
474  for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
475  MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
476  BasicBlock *IncBB = Phi->getIncomingBlock(It);
477 
478  if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
479  IncBB = NewIncBB;
480  else if (IgnoreIncomingWithNoClones)
481  continue;
482 
483  // Now we have IncBB, and will need to add incoming from it to NewPhi.
484 
485  // If IncBB is not a predecessor of NewPhiBB, then do not add it.
486  // NewPhiBB was cloned without that edge.
487  if (!NewPhiBBPreds.count(IncBB))
488  continue;
489 
490  // Determine incoming value and add it as incoming from IncBB.
491  if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
492  if (!MSSA->isLiveOnEntryDef(IncMUD)) {
493  Instruction *IncI = IncMUD->getMemoryInst();
494  assert(IncI && "Found MemoryUseOrDef with no Instruction.");
495  if (Instruction *NewIncI =
496  cast_or_null<Instruction>(VMap.lookup(IncI))) {
497  IncMUD = MSSA->getMemoryAccess(NewIncI);
498  assert(IncMUD &&
499  "MemoryUseOrDef cannot be null, all preds processed.");
500  }
501  }
502  NewPhi->addIncoming(IncMUD, IncBB);
503  } else {
504  MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
505  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
506  NewPhi->addIncoming(NewDefPhi, IncBB);
507  else
508  NewPhi->addIncoming(IncPhi, IncBB);
509  }
510  }
511  };
512 
513  auto ProcessBlock = [&](BasicBlock *BB) {
514  BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
515  if (!NewBlock)
516  return;
517 
518  assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
519  "Cloned block should have no accesses");
520 
521  // Add MemoryPhi.
522  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
523  MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
524  MPhiMap[MPhi] = NewPhi;
525  }
526  // Update Uses and Defs.
527  cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
528  };
529 
530  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
531  ProcessBlock(BB);
532 
533  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
534  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
535  if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
536  FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
537 }
538 
540  BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
541  // All defs/phis from outside BB that are used in BB, are valid uses in P1.
542  // Since those defs/phis must have dominated BB, and also dominate P1.
543  // Defs from BB being used in BB will be replaced with the cloned defs from
544  // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
545  // incoming def into the Phi from P1.
546  PhiToDefMap MPhiMap;
547  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
548  MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
549  cloneUsesAndDefs(BB, P1, VM, MPhiMap);
550 }
551 
552 template <typename Iter>
553 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
554  ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
555  DominatorTree &DT) {
557  // Update/insert phis in all successors of exit blocks.
558  for (auto *Exit : ExitBlocks)
559  for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
560  if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
561  BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
562  Updates.push_back({DT.Insert, NewExit, ExitSucc});
563  }
564  applyInsertUpdates(Updates, DT);
565 }
566 
568  ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
569  DominatorTree &DT) {
570  const ValueToValueMapTy *const Arr[] = {&VMap};
571  privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
572  std::end(Arr), DT);
573 }
574 
576  ArrayRef<BasicBlock *> ExitBlocks,
577  ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
578  auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
579  return I.get();
580  };
581  using MappedIteratorType =
583  decltype(GetPtr)>;
584  auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
585  auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
586  privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
587 }
588 
590  DominatorTree &DT) {
591  SmallVector<CFGUpdate, 4> RevDeleteUpdates;
592  SmallVector<CFGUpdate, 4> InsertUpdates;
593  for (auto &Update : Updates) {
594  if (Update.getKind() == DT.Insert)
595  InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
596  else
597  RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
598  }
599 
600  if (!RevDeleteUpdates.empty()) {
601  // Update for inserted edges: use newDT and snapshot CFG as if deletes had
602  // not occured.
603  // FIXME: This creates a new DT, so it's more expensive to do mix
604  // delete/inserts vs just inserts. We can do an incremental update on the DT
605  // to revert deletes, than re-delete the edges. Teaching DT to do this, is
606  // part of a pending cleanup.
607  DominatorTree NewDT(DT, RevDeleteUpdates);
608  GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
609  applyInsertUpdates(InsertUpdates, NewDT, &GD);
610  } else {
612  applyInsertUpdates(InsertUpdates, DT, &GD);
613  }
614 
615  // Update for deleted edges
616  for (auto &Update : RevDeleteUpdates)
617  removeEdge(Update.getFrom(), Update.getTo());
618 }
619 
621  DominatorTree &DT) {
623  applyInsertUpdates(Updates, DT, &GD);
624 }
625 
627  DominatorTree &DT,
628  const GraphDiff<BasicBlock *> *GD) {
629  // Get recursive last Def, assuming well formed MSSA and updated DT.
630  auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
631  while (true) {
632  MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
633  // Return last Def or Phi in BB, if it exists.
634  if (Defs)
635  return &*(--Defs->end());
636 
637  // Check number of predecessors, we only care if there's more than one.
638  unsigned Count = 0;
639  BasicBlock *Pred = nullptr;
640  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
641  Pred = Pair.second;
642  Count++;
643  if (Count == 2)
644  break;
645  }
646 
647  // If BB has multiple predecessors, get last definition from IDom.
648  if (Count != 1) {
649  // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
650  // DT is invalidated. Return LoE as its last def. This will be added to
651  // MemoryPhi node, and later deleted when the block is deleted.
652  if (!DT.getNode(BB))
653  return MSSA->getLiveOnEntryDef();
654  if (auto *IDom = DT.getNode(BB)->getIDom())
655  if (IDom->getBlock() != BB) {
656  BB = IDom->getBlock();
657  continue;
658  }
659  return MSSA->getLiveOnEntryDef();
660  } else {
661  // Single predecessor, BB cannot be dead. GetLastDef of Pred.
662  assert(Count == 1 && Pred && "Single predecessor expected.");
663  BB = Pred;
664  }
665  };
666  llvm_unreachable("Unable to get last definition.");
667  };
668 
669  // Get nearest IDom given a set of blocks.
670  // TODO: this can be optimized by starting the search at the node with the
671  // lowest level (highest in the tree).
672  auto FindNearestCommonDominator =
673  [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
674  BasicBlock *PrevIDom = *BBSet.begin();
675  for (auto *BB : BBSet)
676  PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
677  return PrevIDom;
678  };
679 
680  // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
681  // include CurrIDom.
682  auto GetNoLongerDomBlocks =
683  [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
684  SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
685  if (PrevIDom == CurrIDom)
686  return;
687  BlocksPrevDom.push_back(PrevIDom);
688  BasicBlock *NextIDom = PrevIDom;
689  while (BasicBlock *UpIDom =
690  DT.getNode(NextIDom)->getIDom()->getBlock()) {
691  if (UpIDom == CurrIDom)
692  break;
693  BlocksPrevDom.push_back(UpIDom);
694  NextIDom = UpIDom;
695  }
696  };
697 
698  // Map a BB to its predecessors: added + previously existing. To get a
699  // deterministic order, store predecessors as SetVectors. The order in each
700  // will be defined by teh order in Updates (fixed) and the order given by
701  // children<> (also fixed). Since we further iterate over these ordered sets,
702  // we lose the information of multiple edges possibly existing between two
703  // blocks, so we'll keep and EdgeCount map for that.
704  // An alternate implementation could keep unordered set for the predecessors,
705  // traverse either Updates or children<> each time to get the deterministic
706  // order, and drop the usage of EdgeCount. This alternate approach would still
707  // require querying the maps for each predecessor, and children<> call has
708  // additional computation inside for creating the snapshot-graph predecessors.
709  // As such, we favor using a little additional storage and less compute time.
710  // This decision can be revisited if we find the alternative more favorable.
711 
712  struct PredInfo {
715  };
717 
718  for (auto &Edge : Updates) {
719  BasicBlock *BB = Edge.getTo();
720  auto &AddedBlockSet = PredMap[BB].Added;
721  AddedBlockSet.insert(Edge.getFrom());
722  }
723 
724  // Store all existing predecessor for each BB, at least one must exist.
727  for (auto &BBPredPair : PredMap) {
728  auto *BB = BBPredPair.first;
729  const auto &AddedBlockSet = BBPredPair.second.Added;
730  auto &PrevBlockSet = BBPredPair.second.Prev;
731  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
732  BasicBlock *Pi = Pair.second;
733  if (!AddedBlockSet.count(Pi))
734  PrevBlockSet.insert(Pi);
735  EdgeCountMap[{Pi, BB}]++;
736  }
737 
738  if (PrevBlockSet.empty()) {
739  assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
740  LLVM_DEBUG(
741  dbgs()
742  << "Adding a predecessor to a block with no predecessors. "
743  "This must be an edge added to a new, likely cloned, block. "
744  "Its memory accesses must be already correct, assuming completed "
745  "via the updateExitBlocksForClonedLoop API. "
746  "Assert a single such edge is added so no phi addition or "
747  "additional processing is required.\n");
748  assert(AddedBlockSet.size() == 1 &&
749  "Can only handle adding one predecessor to a new block.");
750  // Need to remove new blocks from PredMap. Remove below to not invalidate
751  // iterator here.
752  NewBlocks.insert(BB);
753  }
754  }
755  // Nothing to process for new/cloned blocks.
756  for (auto *BB : NewBlocks)
757  PredMap.erase(BB);
758 
759  SmallVector<BasicBlock *, 8> BlocksToProcess;
760  SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
761 
762  // First create MemoryPhis in all blocks that don't have one. Create in the
763  // order found in Updates, not in PredMap, to get deterministic numbering.
764  for (auto &Edge : Updates) {
765  BasicBlock *BB = Edge.getTo();
766  if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
767  MSSA->createMemoryPhi(BB);
768  }
769 
770  // Now we'll fill in the MemoryPhis with the right incoming values.
771  for (auto &BBPredPair : PredMap) {
772  auto *BB = BBPredPair.first;
773  const auto &PrevBlockSet = BBPredPair.second.Prev;
774  const auto &AddedBlockSet = BBPredPair.second.Added;
775  assert(!PrevBlockSet.empty() &&
776  "At least one previous predecessor must exist.");
777 
778  // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
779  // keeping this map before the loop. We can reuse already populated entries
780  // if an edge is added from the same predecessor to two different blocks,
781  // and this does happen in rotate. Note that the map needs to be updated
782  // when deleting non-necessary phis below, if the phi is in the map by
783  // replacing the value with DefP1.
785  for (auto *AddedPred : AddedBlockSet) {
786  auto *DefPn = GetLastDef(AddedPred);
787  assert(DefPn != nullptr && "Unable to find last definition.");
788  LastDefAddedPred[AddedPred] = DefPn;
789  }
790 
791  MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
792  // If Phi is not empty, add an incoming edge from each added pred. Must
793  // still compute blocks with defs to replace for this block below.
794  if (NewPhi->getNumOperands()) {
795  for (auto *Pred : AddedBlockSet) {
796  auto *LastDefForPred = LastDefAddedPred[Pred];
797  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
798  NewPhi->addIncoming(LastDefForPred, Pred);
799  }
800  } else {
801  // Pick any existing predecessor and get its definition. All other
802  // existing predecessors should have the same one, since no phi existed.
803  auto *P1 = *PrevBlockSet.begin();
804  MemoryAccess *DefP1 = GetLastDef(P1);
805 
806  // Check DefP1 against all Defs in LastDefPredPair. If all the same,
807  // nothing to add.
808  bool InsertPhi = false;
809  for (auto LastDefPredPair : LastDefAddedPred)
810  if (DefP1 != LastDefPredPair.second) {
811  InsertPhi = true;
812  break;
813  }
814  if (!InsertPhi) {
815  // Since NewPhi may be used in other newly added Phis, replace all uses
816  // of NewPhi with the definition coming from all predecessors (DefP1),
817  // before deleting it.
818  NewPhi->replaceAllUsesWith(DefP1);
819  removeMemoryAccess(NewPhi);
820  continue;
821  }
822 
823  // Update Phi with new values for new predecessors and old value for all
824  // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
825  // sets, the order of entries in NewPhi is deterministic.
826  for (auto *Pred : AddedBlockSet) {
827  auto *LastDefForPred = LastDefAddedPred[Pred];
828  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
829  NewPhi->addIncoming(LastDefForPred, Pred);
830  }
831  for (auto *Pred : PrevBlockSet)
832  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
833  NewPhi->addIncoming(DefP1, Pred);
834 
835  // Insert BB in the set of blocks that now have definition. We'll use this
836  // to compute IDF and add Phis there next.
837  BlocksToProcess.push_back(BB);
838  }
839 
840  // Get all blocks that used to dominate BB and no longer do after adding
841  // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
842  assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
843  BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
844  assert(PrevIDom && "Previous IDom should exists");
845  BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
846  assert(NewIDom && "BB should have a new valid idom");
847  assert(DT.dominates(NewIDom, PrevIDom) &&
848  "New idom should dominate old idom");
849  GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
850  }
851 
852  // Compute IDF and add Phis in all IDF blocks that do not have one.
854  if (!BlocksToProcess.empty()) {
855  ForwardIDFCalculator IDFs(DT);
856  SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
857  BlocksToProcess.end());
858  IDFs.setDefiningBlocks(DefiningBlocks);
859  IDFs.calculate(IDFBlocks);
860  for (auto *BBIDF : IDFBlocks) {
861  if (auto *IDFPhi = MSSA->getMemoryAccess(BBIDF)) {
862  // Update existing Phi.
863  // FIXME: some updates may be redundant, try to optimize and skip some.
864  for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
865  IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
866  } else {
867  IDFPhi = MSSA->createMemoryPhi(BBIDF);
868  for (auto &Pair : children<GraphDiffInvBBPair>({GD, BBIDF})) {
869  BasicBlock *Pi = Pair.second;
870  IDFPhi->addIncoming(GetLastDef(Pi), Pi);
871  }
872  }
873  }
874  }
875 
876  // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
877  // longer dominate, replace those with the closest dominating def.
878  // This will also update optimized accesses, as they're also uses.
879  for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
880  if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
881  for (auto &DefToReplaceUses : *DefsList) {
882  BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
883  Value::use_iterator UI = DefToReplaceUses.use_begin(),
884  E = DefToReplaceUses.use_end();
885  for (; UI != E;) {
886  Use &U = *UI;
887  ++UI;
889  if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
890  BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
891  if (!DT.dominates(DominatingBlock, DominatedBlock))
892  U.set(GetLastDef(DominatedBlock));
893  } else {
894  BasicBlock *DominatedBlock = Usr->getBlock();
895  if (!DT.dominates(DominatingBlock, DominatedBlock)) {
896  if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
897  U.set(DomBlPhi);
898  else {
899  auto *IDom = DT.getNode(DominatedBlock)->getIDom();
900  assert(IDom && "Block must have a valid IDom.");
901  U.set(GetLastDef(IDom->getBlock()));
902  }
903  cast<MemoryUseOrDef>(Usr)->resetOptimized();
904  }
905  }
906  }
907  }
908  }
909  }
910 }
911 
912 // Move What before Where in the MemorySSA IR.
913 template <class WhereType>
914 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
915  WhereType Where) {
916  // Mark MemoryPhi users of What not to be optimized.
917  for (auto *U : What->users())
918  if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
919  NonOptPhis.insert(PhiUser);
920 
921  // Replace all our users with our defining access.
922  What->replaceAllUsesWith(What->getDefiningAccess());
923 
924  // Let MemorySSA take care of moving it around in the lists.
925  MSSA->moveTo(What, BB, Where);
926 
927  // Now reinsert it into the IR and do whatever fixups needed.
928  if (auto *MD = dyn_cast<MemoryDef>(What))
929  insertDef(MD);
930  else
931  insertUse(cast<MemoryUse>(What));
932 
933  // Clear dangling pointers. We added all MemoryPhi users, but not all
934  // of them are removed by fixupDefs().
935  NonOptPhis.clear();
936 }
937 
938 // Move What before Where in the MemorySSA IR.
940  moveTo(What, Where->getBlock(), Where->getIterator());
941 }
942 
943 // Move What after Where in the MemorySSA IR.
945  moveTo(What, Where->getBlock(), ++Where->getIterator());
946 }
947 
950  return moveTo(What, BB, Where);
951 }
952 
953 // All accesses in To used to be in From. Move to end and update access lists.
954 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
955  Instruction *Start) {
956 
957  MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
958  if (!Accs)
959  return;
960 
961  MemoryAccess *FirstInNew = nullptr;
962  for (Instruction &I : make_range(Start->getIterator(), To->end()))
963  if ((FirstInNew = MSSA->getMemoryAccess(&I)))
964  break;
965  if (!FirstInNew)
966  return;
967 
968  auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
969  do {
970  auto NextIt = ++MUD->getIterator();
971  MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
972  ? nullptr
973  : cast<MemoryUseOrDef>(&*NextIt);
974  MSSA->moveTo(MUD, To, MemorySSA::End);
975  // Moving MUD from Accs in the moveTo above, may delete Accs, so we need to
976  // retrieve it again.
977  Accs = MSSA->getWritableBlockAccesses(From);
978  MUD = NextMUD;
979  } while (MUD);
980 }
981 
983  BasicBlock *To,
984  Instruction *Start) {
985  assert(MSSA->getBlockAccesses(To) == nullptr &&
986  "To block is expected to be free of MemoryAccesses.");
987  moveAllAccesses(From, To, Start);
988  for (BasicBlock *Succ : successors(To))
989  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
990  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
991 }
992 
994  Instruction *Start) {
995  assert(From->getSinglePredecessor() == To &&
996  "From block is expected to have a single predecessor (To).");
997  moveAllAccesses(From, To, Start);
998  for (BasicBlock *Succ : successors(From))
999  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1000  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1001 }
1002 
1003 /// If all arguments of a MemoryPHI are defined by the same incoming
1004 /// argument, return that argument.
1006  MemoryAccess *MA = nullptr;
1007 
1008  for (auto &Arg : MP->operands()) {
1009  if (!MA)
1010  MA = cast<MemoryAccess>(Arg);
1011  else if (MA != Arg)
1012  return nullptr;
1013  }
1014  return MA;
1015 }
1016 
1018  BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1019  bool IdenticalEdgesWereMerged) {
1020  assert(!MSSA->getWritableBlockAccesses(New) &&
1021  "Access list should be null for a new block.");
1022  MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1023  if (!Phi)
1024  return;
1025  if (Old->hasNPredecessors(1)) {
1026  assert(pred_size(New) == Preds.size() &&
1027  "Should have moved all predecessors.");
1028  MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1029  } else {
1030  assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1031  "new immediate predecessor.");
1032  MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1033  SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1034  // Currently only support the case of removing a single incoming edge when
1035  // identical edges were not merged.
1036  if (!IdenticalEdgesWereMerged)
1037  assert(PredsSet.size() == Preds.size() &&
1038  "If identical edges were not merged, we cannot have duplicate "
1039  "blocks in the predecessors");
1040  Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1041  if (PredsSet.count(B)) {
1042  NewPhi->addIncoming(MA, B);
1043  if (!IdenticalEdgesWereMerged)
1044  PredsSet.erase(B);
1045  return true;
1046  }
1047  return false;
1048  });
1049  Phi->addIncoming(NewPhi, New);
1050  if (onlySingleValue(NewPhi))
1051  removeMemoryAccess(NewPhi);
1052  }
1053 }
1054 
1056  assert(!MSSA->isLiveOnEntryDef(MA) &&
1057  "Trying to remove the live on entry def");
1058  // We can only delete phi nodes if they have no uses, or we can replace all
1059  // uses with a single definition.
1060  MemoryAccess *NewDefTarget = nullptr;
1061  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1062  // Note that it is sufficient to know that all edges of the phi node have
1063  // the same argument. If they do, by the definition of dominance frontiers
1064  // (which we used to place this phi), that argument must dominate this phi,
1065  // and thus, must dominate the phi's uses, and so we will not hit the assert
1066  // below.
1067  NewDefTarget = onlySingleValue(MP);
1068  assert((NewDefTarget || MP->use_empty()) &&
1069  "We can't delete this memory phi");
1070  } else {
1071  NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1072  }
1073 
1074  // Re-point the uses at our defining access
1075  if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1076  // Reset optimized on users of this store, and reset the uses.
1077  // A few notes:
1078  // 1. This is a slightly modified version of RAUW to avoid walking the
1079  // uses twice here.
1080  // 2. If we wanted to be complete, we would have to reset the optimized
1081  // flags on users of phi nodes if doing the below makes a phi node have all
1082  // the same arguments. Instead, we prefer users to removeMemoryAccess those
1083  // phi nodes, because doing it here would be N^3.
1084  if (MA->hasValueHandle())
1085  ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1086  // Note: We assume MemorySSA is not used in metadata since it's not really
1087  // part of the IR.
1088 
1089  while (!MA->use_empty()) {
1090  Use &U = *MA->use_begin();
1091  if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1092  MUD->resetOptimized();
1093  U.set(NewDefTarget);
1094  }
1095  }
1096 
1097  // The call below to erase will destroy MA, so we can't change the order we
1098  // are doing things here
1099  MSSA->removeFromLookups(MA);
1100  MSSA->removeFromLists(MA);
1101 }
1102 
1104  const SmallPtrSetImpl<BasicBlock *> &DeadBlocks) {
1105  // First delete all uses of BB in MemoryPhis.
1106  for (BasicBlock *BB : DeadBlocks) {
1107  Instruction *TI = BB->getTerminator();
1108  assert(TI && "Basic block expected to have a terminator instruction");
1109  for (BasicBlock *Succ : successors(TI))
1110  if (!DeadBlocks.count(Succ))
1111  if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1112  MP->unorderedDeleteIncomingBlock(BB);
1113  if (MP->getNumIncomingValues() == 1)
1114  removeMemoryAccess(MP);
1115  }
1116  // Drop all references of all accesses in BB
1117  if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1118  for (MemoryAccess &MA : *Acc)
1119  MA.dropAllReferences();
1120  }
1121 
1122  // Next, delete all memory accesses in each block
1123  for (BasicBlock *BB : DeadBlocks) {
1124  MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1125  if (!Acc)
1126  continue;
1127  for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1128  MemoryAccess *MA = &*AB;
1129  ++AB;
1130  MSSA->removeFromLookups(MA);
1131  MSSA->removeFromLists(MA);
1132  }
1133  }
1134 }
1135 
1137  Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1138  MemorySSA::InsertionPlace Point) {
1139  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1140  MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1141  return NewAccess;
1142 }
1143 
1145  Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1146  assert(I->getParent() == InsertPt->getBlock() &&
1147  "New and old access must be in the same block");
1148  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1149  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1150  InsertPt->getIterator());
1151  return NewAccess;
1152 }
1153 
1155  Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1156  assert(I->getParent() == InsertPt->getBlock() &&
1157  "New and old access must be in the same block");
1158  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1159  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1160  ++InsertPt->getIterator());
1161  return NewAccess;
1162 }
use_iterator use_end()
Definition: Value.h:347
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:259
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:799
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:261
static MemoryAccess * onlySingleValue(MemoryPhi *MP)
If all arguments of a MemoryPHI are defined by the same incoming argument, return that argument...
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:250
void dropAllReferences()
Drop all references to operands.
Definition: User.h:295
This class represents lattice values for constants.
Definition: AllocatorList.h:24
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list...
Definition: MemorySSA.h:176
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2029
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:255
void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
iterator begin() const
Definition: ArrayRef.h:137
void removeDuplicatePhiEdgesBetween(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:751
void removeBlocks(const SmallPtrSetImpl< BasicBlock *> &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
This file contains the declarations for metadata subclasses.
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
Definition: Value.h:486
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:373
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B...
void insertUse(MemoryUse *Use)
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:573
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:138
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:188
op_iterator op_begin()
Definition: User.h:230
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:532
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:269
Represents read-only accesses to memory.
Definition: MemorySSA.h:317
void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:818
block_iterator block_begin()
Definition: MemorySSA.h:498
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
void setDefiningBlocks(const SmallPtrSetImpl< BasicBlock *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG updates, analogous with the DT edge updates.
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1511
A simple intrusive list implementation.
Definition: simple_ilist.h:79
void removeMemoryAccess(MemoryAccess *)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:41
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:713
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:171
static void ValueIsRAUWd(Value *Old, Value *New)
Definition: Value.cpp:885
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
Definition: Sink.cpp:199
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before or after an existing MemoryAccess.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:145
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:531
use_iterator_impl< Use > use_iterator
Definition: Value.h:332
NodeT * getBlock() const
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:234
static constexpr UpdateKind Insert
void set(Value *Val)
Definition: Value.h:671
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:182
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, MemoryAccess *NewDef)
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:337
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:129
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:562
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:784
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
op_iterator op_end()
Definition: User.h:232
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
op_range operands()
Definition: User.h:238
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
self_iterator getIterator()
Definition: ilist_node.h:82
void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock *> ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
Compute iterated dominance frontiers using a linear time algorithm.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:390
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr)
Definition: MemorySSA.cpp:1618
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:298
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
Definition: MemorySSA.h:297
unsigned getNumOperands() const
Definition: User.h:192
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
BlockVerifier::State From
iterator end()
Definition: BasicBlock.h:271
void calculate(SmallVectorImpl< BasicBlock *> &IDFBlocks)
Calculate iterated dominance frontiers.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:249
Module.h This file contains the declarations for the Module class.
iterator end() const
Definition: ArrayRef.h:138
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock *> Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
BasicBlock * getBlock() const
Definition: MemorySSA.h:157
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:125
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:735
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:245
iterator_range< user_iterator > users()
Definition: Value.h:400
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:528
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:541
amdgpu Simplify well known AMD library false Value Value * Arg
use_iterator use_begin()
Definition: Value.h:339
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:133
block_iterator block_end()
Definition: MemorySSA.h:509
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:122
#define I(x, y, z)
Definition: MD5.cpp:58
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was merged into To.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:731
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
Definition: Path.cpp:329
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:211
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:173
Determine the iterated dominance frontier, given a set of defining blocks, and optionally, a set of live-in blocks.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:376
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
succ_range successors(Instruction *I)
Definition: CFG.h:264
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:194
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:479
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:805
#define LLVM_DEBUG(X)
Definition: Debug.h:123
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1238
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where)
void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM)
bool use_empty() const
Definition: Value.h:323
reverse_iterator rbegin()
Definition: simple_ilist.h:122
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
const BasicBlock * getParent() const
Definition: Instruction.h:67
user_iterator user_end()
Definition: Value.h:384