LLVM  8.0.1
MemorySSA.h
Go to the documentation of this file.
1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file exposes an interface to building/using memory SSA to
12 /// walk memory instructions using a use/def graph.
13 ///
14 /// Memory SSA class builds an SSA form that links together memory access
15 /// instructions such as loads, stores, atomics, and calls. Additionally, it
16 /// does a trivial form of "heap versioning" Every time the memory state changes
17 /// in the program, we generate a new heap version. It generates
18 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
19 ///
20 /// As a trivial example,
21 /// define i32 @main() #0 {
22 /// entry:
23 /// %call = call noalias i8* @_Znwm(i64 4) #2
24 /// %0 = bitcast i8* %call to i32*
25 /// %call1 = call noalias i8* @_Znwm(i64 4) #2
26 /// %1 = bitcast i8* %call1 to i32*
27 /// store i32 5, i32* %0, align 4
28 /// store i32 7, i32* %1, align 4
29 /// %2 = load i32* %0, align 4
30 /// %3 = load i32* %1, align 4
31 /// %add = add nsw i32 %2, %3
32 /// ret i32 %add
33 /// }
34 ///
35 /// Will become
36 /// define i32 @main() #0 {
37 /// entry:
38 /// ; 1 = MemoryDef(0)
39 /// %call = call noalias i8* @_Znwm(i64 4) #3
40 /// %2 = bitcast i8* %call to i32*
41 /// ; 2 = MemoryDef(1)
42 /// %call1 = call noalias i8* @_Znwm(i64 4) #3
43 /// %4 = bitcast i8* %call1 to i32*
44 /// ; 3 = MemoryDef(2)
45 /// store i32 5, i32* %2, align 4
46 /// ; 4 = MemoryDef(3)
47 /// store i32 7, i32* %4, align 4
48 /// ; MemoryUse(3)
49 /// %7 = load i32* %2, align 4
50 /// ; MemoryUse(4)
51 /// %8 = load i32* %4, align 4
52 /// %add = add nsw i32 %7, %8
53 /// ret i32 %add
54 /// }
55 ///
56 /// Given this form, all the stores that could ever effect the load at %8 can be
57 /// gotten by using the MemoryUse associated with it, and walking from use to
58 /// def until you hit the top of the function.
59 ///
60 /// Each def also has a list of users associated with it, so you can walk from
61 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
62 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
63 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
64 /// store, all the MemoryUses on its use lists are may-aliases of that store
65 /// (but the MemoryDefs on its use list may not be).
66 ///
67 /// MemoryDefs are not disambiguated because it would require multiple reaching
68 /// definitions, which would require multiple phis, and multiple memoryaccesses
69 /// per instruction.
70 //
71 //===----------------------------------------------------------------------===//
72 
73 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
74 #define LLVM_ANALYSIS_MEMORYSSA_H
75 
76 #include "llvm/ADT/DenseMap.h"
77 #include "llvm/ADT/GraphTraits.h"
78 #include "llvm/ADT/SmallPtrSet.h"
79 #include "llvm/ADT/SmallVector.h"
80 #include "llvm/ADT/ilist.h"
81 #include "llvm/ADT/ilist_node.h"
82 #include "llvm/ADT/iterator.h"
84 #include "llvm/ADT/simple_ilist.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/DerivedUser.h"
90 #include "llvm/IR/Dominators.h"
91 #include "llvm/IR/Module.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/Use.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/IR/ValueHandle.h"
97 #include "llvm/Pass.h"
98 #include "llvm/Support/Casting.h"
99 #include <algorithm>
100 #include <cassert>
101 #include <cstddef>
102 #include <iterator>
103 #include <memory>
104 #include <utility>
105 
106 namespace llvm {
107 
108 class Function;
109 class Instruction;
110 class MemoryAccess;
111 class MemorySSAWalker;
112 class LLVMContext;
113 class raw_ostream;
114 
115 namespace MSSAHelpers {
116 
117 struct AllAccessTag {};
118 struct DefsOnlyTag {};
119 
120 } // end namespace MSSAHelpers
121 
122 enum : unsigned {
123  // Used to signify what the default invalid ID is for MemoryAccess's
124  // getID()
126 };
127 
128 template <class T> class memoryaccess_def_iterator_base;
132 
133 // The base for all memory accesses. All memory accesses in a block are
134 // linked together using an intrusive list.
136  : public DerivedUser,
137  public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
138  public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
139 public:
140  using AllAccessType =
142  using DefsOnlyType =
144 
145  MemoryAccess(const MemoryAccess &) = delete;
146  MemoryAccess &operator=(const MemoryAccess &) = delete;
147 
148  void *operator new(size_t) = delete;
149 
150  // Methods for support type inquiry through isa, cast, and
151  // dyn_cast
152  static bool classof(const Value *V) {
153  unsigned ID = V->getValueID();
154  return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
155  }
156 
157  BasicBlock *getBlock() const { return Block; }
158 
159  void print(raw_ostream &OS) const;
160  void dump() const;
161 
162  /// The user iterators for a memory access
165 
166  /// This iterator walks over all of the defs in a given
167  /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
168  /// MemoryUse/MemoryDef, this walks the defining access.
169  memoryaccess_def_iterator defs_begin();
170  const_memoryaccess_def_iterator defs_begin() const;
171  memoryaccess_def_iterator defs_end();
172  const_memoryaccess_def_iterator defs_end() const;
173 
174  /// Get the iterators for the all access list and the defs only list
175  /// We default to the all access list.
177  return this->AllAccessType::getIterator();
178  }
180  return this->AllAccessType::getIterator();
181  }
183  return this->AllAccessType::getReverseIterator();
184  }
186  return this->AllAccessType::getReverseIterator();
187  }
189  return this->DefsOnlyType::getIterator();
190  }
192  return this->DefsOnlyType::getIterator();
193  }
195  return this->DefsOnlyType::getReverseIterator();
196  }
198  return this->DefsOnlyType::getReverseIterator();
199  }
200 
201 protected:
202  friend class MemoryDef;
203  friend class MemoryPhi;
204  friend class MemorySSA;
205  friend class MemoryUse;
206  friend class MemoryUseOrDef;
207 
208  /// Used by MemorySSA to change the block of a MemoryAccess when it is
209  /// moved.
210  void setBlock(BasicBlock *BB) { Block = BB; }
211 
212  /// Used for debugging and tracking things about MemoryAccesses.
213  /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
214  inline unsigned getID() const;
215 
216  MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
217  BasicBlock *BB, unsigned NumOperands)
218  : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
219  Block(BB) {}
220 
221  // Use deleteValue() to delete a generic MemoryAccess.
222  ~MemoryAccess() = default;
223 
224 private:
225  BasicBlock *Block;
226 };
227 
228 template <>
230  static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
231 };
232 
234  MA.print(OS);
235  return OS;
236 }
237 
238 /// Class that has the common methods + fields of memory uses/defs. It's
239 /// a little awkward to have, but there are many cases where we want either a
240 /// use or def, and there are many cases where uses are needed (defs aren't
241 /// acceptable), and vice-versa.
242 ///
243 /// This class should never be instantiated directly; make a MemoryUse or
244 /// MemoryDef instead.
245 class MemoryUseOrDef : public MemoryAccess {
246 public:
247  void *operator new(size_t) = delete;
248 
250 
251  /// Get the instruction that this MemoryUse represents.
252  Instruction *getMemoryInst() const { return MemoryInstruction; }
253 
254  /// Get the access that produces the memory state used by this Use.
255  MemoryAccess *getDefiningAccess() const { return getOperand(0); }
256 
257  static bool classof(const Value *MA) {
258  return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
259  }
260 
261  // Sadly, these have to be public because they are needed in some of the
262  // iterators.
263  inline bool isOptimized() const;
264  inline MemoryAccess *getOptimized() const;
265  inline void setOptimized(MemoryAccess *);
266 
267  // Retrieve AliasResult type of the optimized access. Ideally this would be
268  // returned by the caching walker and may go away in the future.
270  return OptimizedAccessAlias;
271  }
272 
273  /// Reset the ID of what this MemoryUse was optimized to, causing it to
274  /// be rewalked by the walker if necessary.
275  /// This really should only be called by tests.
276  inline void resetOptimized();
277 
278 protected:
279  friend class MemorySSA;
280  friend class MemorySSAUpdater;
281 
282  MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
283  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
284  unsigned NumOperands)
285  : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
286  MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) {
287  setDefiningAccess(DMA);
288  }
289 
290  // Use deleteValue() to delete a generic MemoryUseOrDef.
291  ~MemoryUseOrDef() = default;
292 
294  OptimizedAccessAlias = AR;
295  }
296 
297  void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
299  if (!Optimized) {
300  setOperand(0, DMA);
301  return;
302  }
303  setOptimized(DMA);
304  setOptimizedAccessType(AR);
305  }
306 
307 private:
308  Instruction *MemoryInstruction;
309  Optional<AliasResult> OptimizedAccessAlias;
310 };
311 
312 /// Represents read-only accesses to memory
313 ///
314 /// In particular, the set of Instructions that will be represented by
315 /// MemoryUse's is exactly the set of Instructions for which
316 /// AliasAnalysis::getModRefInfo returns "Ref".
317 class MemoryUse final : public MemoryUseOrDef {
318 public:
320 
322  : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
323  /*NumOperands=*/1) {}
324 
325  // allocate space for exactly one operand
326  void *operator new(size_t s) { return User::operator new(s, 1); }
327 
328  static bool classof(const Value *MA) {
329  return MA->getValueID() == MemoryUseVal;
330  }
331 
332  void print(raw_ostream &OS) const;
333 
335  OptimizedID = DMA->getID();
336  setOperand(0, DMA);
337  }
338 
339  bool isOptimized() const {
340  return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
341  }
342 
344  return getDefiningAccess();
345  }
346 
347  void resetOptimized() {
348  OptimizedID = INVALID_MEMORYACCESS_ID;
349  }
350 
351 protected:
352  friend class MemorySSA;
353 
354 private:
355  static void deleteMe(DerivedUser *Self);
356 
357  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
358 };
359 
360 template <>
361 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
363 
364 /// Represents a read-write access to memory, whether it is a must-alias,
365 /// or a may-alias.
366 ///
367 /// In particular, the set of Instructions that will be represented by
368 /// MemoryDef's is exactly the set of Instructions for which
369 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
370 /// Note that, in order to provide def-def chains, all defs also have a use
371 /// associated with them. This use points to the nearest reaching
372 /// MemoryDef/MemoryPhi.
373 class MemoryDef final : public MemoryUseOrDef {
374 public:
375  friend class MemorySSA;
376 
378 
380  unsigned Ver)
381  : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
382  /*NumOperands=*/2),
383  ID(Ver) {}
384 
385  // allocate space for exactly two operands
386  void *operator new(size_t s) { return User::operator new(s, 2); }
387 
388  static bool classof(const Value *MA) {
389  return MA->getValueID() == MemoryDefVal;
390  }
391 
393  setOperand(1, MA);
394  OptimizedID = MA->getID();
395  }
396 
398  return cast_or_null<MemoryAccess>(getOperand(1));
399  }
400 
401  bool isOptimized() const {
402  return getOptimized() && OptimizedID == getOptimized()->getID();
403  }
404 
405  void resetOptimized() {
406  OptimizedID = INVALID_MEMORYACCESS_ID;
407  setOperand(1, nullptr);
408  }
409 
410  void print(raw_ostream &OS) const;
411 
412  unsigned getID() const { return ID; }
413 
414 private:
415  static void deleteMe(DerivedUser *Self);
416 
417  const unsigned ID;
418  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
419 };
420 
421 template <>
422 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
424 
425 template <>
427  static Use *op_begin(MemoryUseOrDef *MUD) {
428  if (auto *MU = dyn_cast<MemoryUse>(MUD))
430  return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
431  }
432 
433  static Use *op_end(MemoryUseOrDef *MUD) {
434  if (auto *MU = dyn_cast<MemoryUse>(MUD))
436  return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
437  }
438 
439  static unsigned operands(const MemoryUseOrDef *MUD) {
440  if (const auto *MU = dyn_cast<MemoryUse>(MUD))
442  return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
443  }
444 };
446 
447 /// Represents phi nodes for memory accesses.
448 ///
449 /// These have the same semantic as regular phi nodes, with the exception that
450 /// only one phi will ever exist in a given basic block.
451 /// Guaranteeing one phi per block means guaranteeing there is only ever one
452 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
453 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
454 /// a MemoryPhi's operands.
455 /// That is, given
456 /// if (a) {
457 /// store %a
458 /// store %b
459 /// }
460 /// it *must* be transformed into
461 /// if (a) {
462 /// 1 = MemoryDef(liveOnEntry)
463 /// store %a
464 /// 2 = MemoryDef(1)
465 /// store %b
466 /// }
467 /// and *not*
468 /// if (a) {
469 /// 1 = MemoryDef(liveOnEntry)
470 /// store %a
471 /// 2 = MemoryDef(liveOnEntry)
472 /// store %b
473 /// }
474 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
475 /// end of the branch, and if there are not two phi nodes, one will be
476 /// disconnected completely from the SSA graph below that point.
477 /// Because MemoryUse's do not generate new definitions, they do not have this
478 /// issue.
479 class MemoryPhi final : public MemoryAccess {
480  // allocate space for exactly zero operands
481  void *operator new(size_t s) { return User::operator new(s); }
482 
483 public:
484  /// Provide fast operand accessors
486 
487  MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
488  : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
489  ReservedSpace(NumPreds) {
490  allocHungoffUses(ReservedSpace);
491  }
492 
493  // Block iterator interface. This provides access to the list of incoming
494  // basic blocks, which parallels the list of incoming values.
497 
499  auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
500  return reinterpret_cast<block_iterator>(Ref + 1);
501  }
502 
504  const auto *Ref =
505  reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
506  return reinterpret_cast<const_block_iterator>(Ref + 1);
507  }
508 
509  block_iterator block_end() { return block_begin() + getNumOperands(); }
510 
512  return block_begin() + getNumOperands();
513  }
514 
516  return make_range(block_begin(), block_end());
517  }
518 
520  return make_range(block_begin(), block_end());
521  }
522 
523  op_range incoming_values() { return operands(); }
524 
525  const_op_range incoming_values() const { return operands(); }
526 
527  /// Return the number of incoming edges
528  unsigned getNumIncomingValues() const { return getNumOperands(); }
529 
530  /// Return incoming value number x
531  MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
532  void setIncomingValue(unsigned I, MemoryAccess *V) {
533  assert(V && "PHI node got a null value!");
534  setOperand(I, V);
535  }
536 
537  static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
538  static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
539 
540  /// Return incoming basic block number @p i.
541  BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
542 
543  /// Return incoming basic block corresponding
544  /// to an operand of the PHI.
545  BasicBlock *getIncomingBlock(const Use &U) const {
546  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
547  return getIncomingBlock(unsigned(&U - op_begin()));
548  }
549 
550  /// Return incoming basic block corresponding
551  /// to value use iterator.
553  return getIncomingBlock(I.getUse());
554  }
555 
556  void setIncomingBlock(unsigned I, BasicBlock *BB) {
557  assert(BB && "PHI node got a null basic block!");
558  block_begin()[I] = BB;
559  }
560 
561  /// Add an incoming value to the end of the PHI list
563  if (getNumOperands() == ReservedSpace)
564  growOperands(); // Get more space!
565  // Initialize some new operands.
566  setNumHungOffUseOperands(getNumOperands() + 1);
567  setIncomingValue(getNumOperands() - 1, V);
568  setIncomingBlock(getNumOperands() - 1, BB);
569  }
570 
571  /// Return the first index of the specified basic
572  /// block in the value list for this PHI. Returns -1 if no instance.
573  int getBasicBlockIndex(const BasicBlock *BB) const {
574  for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
575  if (block_begin()[I] == BB)
576  return I;
577  return -1;
578  }
579 
581  int Idx = getBasicBlockIndex(BB);
582  assert(Idx >= 0 && "Invalid basic block argument!");
583  return getIncomingValue(Idx);
584  }
585 
586  // After deleting incoming position I, the order of incoming may be changed.
587  void unorderedDeleteIncoming(unsigned I) {
588  unsigned E = getNumOperands();
589  assert(I < E && "Cannot remove out of bounds Phi entry.");
590  // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
591  // itself should be deleted.
592  assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
593  "at least 2 values.");
594  setIncomingValue(I, getIncomingValue(E - 1));
595  setIncomingBlock(I, block_begin()[E - 1]);
596  setOperand(E - 1, nullptr);
597  block_begin()[E - 1] = nullptr;
598  setNumHungOffUseOperands(getNumOperands() - 1);
599  }
600 
601  // After deleting entries that satisfy Pred, remaining entries may have
602  // changed order.
603  template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
604  for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
605  if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
606  unorderedDeleteIncoming(I);
607  E = getNumOperands();
608  --I;
609  }
610  assert(getNumOperands() >= 1 &&
611  "Cannot remove all incoming blocks in a MemoryPhi.");
612  }
613 
614  // After deleting incoming block BB, the incoming blocks order may be changed.
616  unorderedDeleteIncomingIf(
617  [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
618  }
619 
620  // After deleting incoming memory access MA, the incoming accesses order may
621  // be changed.
623  unorderedDeleteIncomingIf(
624  [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
625  }
626 
627  static bool classof(const Value *V) {
628  return V->getValueID() == MemoryPhiVal;
629  }
630 
631  void print(raw_ostream &OS) const;
632 
633  unsigned getID() const { return ID; }
634 
635 protected:
636  friend class MemorySSA;
637 
638  /// this is more complicated than the generic
639  /// User::allocHungoffUses, because we have to allocate Uses for the incoming
640  /// values and pointers to the incoming blocks, all in one allocation.
641  void allocHungoffUses(unsigned N) {
642  User::allocHungoffUses(N, /* IsPhi */ true);
643  }
644 
645 private:
646  // For debugging only
647  const unsigned ID;
648  unsigned ReservedSpace;
649 
650  /// This grows the operand list in response to a push_back style of
651  /// operation. This grows the number of ops by 1.5 times.
652  void growOperands() {
653  unsigned E = getNumOperands();
654  // 2 op PHI nodes are VERY common, so reserve at least enough for that.
655  ReservedSpace = std::max(E + E / 2, 2u);
656  growHungoffUses(ReservedSpace, /* IsPhi */ true);
657  }
658 
659  static void deleteMe(DerivedUser *Self);
660 };
661 
662 inline unsigned MemoryAccess::getID() const {
663  assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
664  "only memory defs and phis have ids");
665  if (const auto *MD = dyn_cast<MemoryDef>(this))
666  return MD->getID();
667  return cast<MemoryPhi>(this)->getID();
668 }
669 
670 inline bool MemoryUseOrDef::isOptimized() const {
671  if (const auto *MD = dyn_cast<MemoryDef>(this))
672  return MD->isOptimized();
673  return cast<MemoryUse>(this)->isOptimized();
674 }
675 
677  if (const auto *MD = dyn_cast<MemoryDef>(this))
678  return MD->getOptimized();
679  return cast<MemoryUse>(this)->getOptimized();
680 }
681 
683  if (auto *MD = dyn_cast<MemoryDef>(this))
684  MD->setOptimized(MA);
685  else
686  cast<MemoryUse>(this)->setOptimized(MA);
687 }
688 
690  if (auto *MD = dyn_cast<MemoryDef>(this))
691  MD->resetOptimized();
692  else
693  cast<MemoryUse>(this)->resetOptimized();
694 }
695 
696 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
698 
699 /// Encapsulates MemorySSA, including all data associated with memory
700 /// accesses.
701 class MemorySSA {
702 public:
704  ~MemorySSA();
705 
706  MemorySSAWalker *getWalker();
707  MemorySSAWalker *getSkipSelfWalker();
708 
709  /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
710  /// access associated with it. If passed a basic block gets the memory phi
711  /// node that exists for that block, if there is one. Otherwise, this will get
712  /// a MemoryUseOrDef.
714  return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
715  }
716 
717  MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
718  return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
719  }
720 
721  void dump() const;
722  void print(raw_ostream &) const;
723 
724  /// Return true if \p MA represents the live on entry value
725  ///
726  /// Loads and stores from pointer arguments and other global values may be
727  /// defined by memory operations that do not occur in the current function, so
728  /// they may be live on entry to the function. MemorySSA represents such
729  /// memory state by the live on entry definition, which is guaranteed to occur
730  /// before any other memory access in the function.
731  inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
732  return MA == LiveOnEntryDef.get();
733  }
734 
735  inline MemoryAccess *getLiveOnEntryDef() const {
736  return LiveOnEntryDef.get();
737  }
738 
739  // Sadly, iplists, by default, owns and deletes pointers added to the
740  // list. It's not currently possible to have two iplists for the same type,
741  // where one owns the pointers, and one does not. This is because the traits
742  // are per-type, not per-tag. If this ever changes, we should make the
743  // DefList an iplist.
745  using DefsList =
747 
748  /// Return the list of MemoryAccess's for a given basic block.
749  ///
750  /// This list is not modifiable by the user.
751  const AccessList *getBlockAccesses(const BasicBlock *BB) const {
752  return getWritableBlockAccesses(BB);
753  }
754 
755  /// Return the list of MemoryDef's and MemoryPhi's for a given basic
756  /// block.
757  ///
758  /// This list is not modifiable by the user.
759  const DefsList *getBlockDefs(const BasicBlock *BB) const {
760  return getWritableBlockDefs(BB);
761  }
762 
763  /// Given two memory accesses in the same basic block, determine
764  /// whether MemoryAccess \p A dominates MemoryAccess \p B.
765  bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
766 
767  /// Given two memory accesses in potentially different blocks,
768  /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
769  bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
770 
771  /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
772  /// dominates Use \p B.
773  bool dominates(const MemoryAccess *A, const Use &B) const;
774 
775  /// Verify that MemorySSA is self consistent (IE definitions dominate
776  /// all uses, uses appear in the right places). This is used by unit tests.
777  void verifyMemorySSA() const;
778 
779  /// Check clobber sanity for an access.
780  void checkClobberSanityAccess(const MemoryAccess *MA) const;
781 
782  /// Used in various insertion functions to specify whether we are talking
783  /// about the beginning or end of a block.
784  enum InsertionPlace { Beginning, End };
785 
786 protected:
787  // Used by Memory SSA annotater, dumpers, and wrapper pass
790  friend class MemorySSAUpdater;
791 
792  void verifyDefUses(Function &F) const;
793  void verifyDomination(Function &F) const;
794  void verifyOrdering(Function &F) const;
795  void verifyDominationNumbers(const Function &F) const;
796  void verifyClobberSanity(const Function &F) const;
797 
798  // This is used by the use optimizer and updater.
800  auto It = PerBlockAccesses.find(BB);
801  return It == PerBlockAccesses.end() ? nullptr : It->second.get();
802  }
803 
804  // This is used by the use optimizer and updater.
806  auto It = PerBlockDefs.find(BB);
807  return It == PerBlockDefs.end() ? nullptr : It->second.get();
808  }
809 
810  // These is used by the updater to perform various internal MemorySSA
811  // machinsations. They do not always leave the IR in a correct state, and
812  // relies on the updater to fixup what it breaks, so it is not public.
813 
814  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
815  void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
816 
817  // Rename the dominator tree branch rooted at BB.
818  void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
820  renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
821  }
822 
823  void removeFromLookups(MemoryAccess *);
824  void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
825  void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
827  void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
828  AccessList::iterator);
829  MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
830  const MemoryUseOrDef *Template = nullptr);
831 
832 private:
833  class ClobberWalkerBase;
834  class CachingWalker;
835  class SkipSelfWalker;
836  class OptimizeUses;
837 
838  CachingWalker *getWalkerImpl();
839  void buildMemorySSA();
840  void optimizeUses();
841 
842  void prepareForMoveTo(MemoryAccess *, BasicBlock *);
843  void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
844 
847 
848  void
849  determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
850  void markUnreachableAsLiveOnEntry(BasicBlock *BB);
851  bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
852  MemoryPhi *createMemoryPhi(BasicBlock *BB);
853  MemoryUseOrDef *createNewAccess(Instruction *,
854  const MemoryUseOrDef *Template = nullptr);
855  MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
856  void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
857  MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
858  void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
859  void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
861  bool SkipVisited = false, bool RenameAllUses = false);
862  AccessList *getOrCreateAccessList(const BasicBlock *);
863  DefsList *getOrCreateDefsList(const BasicBlock *);
864  void renumberBlock(const BasicBlock *) const;
865  AliasAnalysis *AA;
866  DominatorTree *DT;
867  Function &F;
868 
869  // Memory SSA mappings
870  DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
871 
872  // These two mappings contain the main block to access/def mappings for
873  // MemorySSA. The list contained in PerBlockAccesses really owns all the
874  // MemoryAccesses.
875  // Both maps maintain the invariant that if a block is found in them, the
876  // corresponding list is not empty, and if a block is not found in them, the
877  // corresponding list is empty.
878  AccessMap PerBlockAccesses;
879  DefsMap PerBlockDefs;
880  std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
881 
882  // Domination mappings
883  // Note that the numbering is local to a block, even though the map is
884  // global.
885  mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
886  mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
887 
888  // Memory SSA building info
889  std::unique_ptr<ClobberWalkerBase> WalkerBase;
890  std::unique_ptr<CachingWalker> Walker;
891  std::unique_ptr<SkipSelfWalker> SkipWalker;
892  unsigned NextID;
893 };
894 
895 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
897 protected:
898  friend class GVNHoist;
899  friend class MemorySSAWalker;
900 
901  // This function should not be used by new passes.
902  static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
903  AliasAnalysis &AA);
904 };
905 
906 // This pass does eager building and then printing of MemorySSA. It is used by
907 // the tests to be able to build, dump, and verify Memory SSA.
909 public:
911 
912  bool runOnFunction(Function &) override;
913  void getAnalysisUsage(AnalysisUsage &AU) const override;
914 
915  static char ID;
916 };
917 
918 /// An analysis that produces \c MemorySSA for a function.
919 ///
920 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
922 
923  static AnalysisKey Key;
924 
925 public:
926  // Wrap MemorySSA result to ensure address stability of internal MemorySSA
927  // pointers after construction. Use a wrapper class instead of plain
928  // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
929  struct Result {
930  Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
931 
932  MemorySSA &getMSSA() { return *MSSA.get(); }
933 
934  std::unique_ptr<MemorySSA> MSSA;
935  };
936 
938 };
939 
940 /// Printer pass for \c MemorySSA.
941 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
942  raw_ostream &OS;
943 
944 public:
945  explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
946 
948 };
949 
950 /// Verifier pass for \c MemorySSA.
951 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
953 };
954 
955 /// Legacy analysis pass which computes \c MemorySSA.
957 public:
959 
960  static char ID;
961 
962  bool runOnFunction(Function &) override;
963  void releaseMemory() override;
964  MemorySSA &getMSSA() { return *MSSA; }
965  const MemorySSA &getMSSA() const { return *MSSA; }
966 
967  void getAnalysisUsage(AnalysisUsage &AU) const override;
968 
969  void verifyAnalysis() const override;
970  void print(raw_ostream &OS, const Module *M = nullptr) const override;
971 
972 private:
973  std::unique_ptr<MemorySSA> MSSA;
974 };
975 
976 /// This is the generic walker interface for walkers of MemorySSA.
977 /// Walkers are used to be able to further disambiguate the def-use chains
978 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
979 /// you.
980 /// In particular, while the def-use chains provide basic information, and are
981 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
982 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
983 /// information. In particular, they may want to use SCEV info to further
984 /// disambiguate memory accesses, or they may want the nearest dominating
985 /// may-aliasing MemoryDef for a call or a store. This API enables a
986 /// standardized interface to getting and using that info.
988 public:
990  virtual ~MemorySSAWalker() = default;
991 
993 
994  /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
995  /// will give you the nearest dominating MemoryAccess that Mod's the location
996  /// the instruction accesses (by skipping any def which AA can prove does not
997  /// alias the location(s) accessed by the instruction given).
998  ///
999  /// Note that this will return a single access, and it must dominate the
1000  /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
1001  /// this will return the MemoryPhi, not the operand. This means that
1002  /// given:
1003  /// if (a) {
1004  /// 1 = MemoryDef(liveOnEntry)
1005  /// store %a
1006  /// } else {
1007  /// 2 = MemoryDef(liveOnEntry)
1008  /// store %b
1009  /// }
1010  /// 3 = MemoryPhi(2, 1)
1011  /// MemoryUse(3)
1012  /// load %a
1013  ///
1014  /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1015  /// in the if (a) branch.
1017  MemoryAccess *MA = MSSA->getMemoryAccess(I);
1018  assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
1019  return getClobberingMemoryAccess(MA);
1020  }
1021 
1022  /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1023  /// but takes a MemoryAccess instead of an Instruction.
1024  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
1025 
1026  /// Given a potentially clobbering memory access and a new location,
1027  /// calling this will give you the nearest dominating clobbering MemoryAccess
1028  /// (by skipping non-aliasing def links).
1029  ///
1030  /// This version of the function is mainly used to disambiguate phi translated
1031  /// pointers, where the value of a pointer may have changed from the initial
1032  /// memory access. Note that this expects to be handed either a MemoryUse,
1033  /// or an already potentially clobbering access. Unlike the above API, if
1034  /// given a MemoryDef that clobbers the pointer as the starting access, it
1035  /// will return that MemoryDef, whereas the above would return the clobber
1036  /// starting from the use side of the memory def.
1037  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1038  const MemoryLocation &) = 0;
1039 
1040  /// Given a memory access, invalidate anything this walker knows about
1041  /// that access.
1042  /// This API is used by walkers that store information to perform basic cache
1043  /// invalidation. This will be called by MemorySSA at appropriate times for
1044  /// the walker it uses or returns.
1045  virtual void invalidateInfo(MemoryAccess *) {}
1046 
1047  virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
1048 
1049 protected:
1050  friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1051  // constructor.
1053 };
1054 
1055 /// A MemorySSAWalker that does no alias queries, or anything else. It
1056 /// simply returns the links as they were constructed by the builder.
1058 public:
1059  // Keep the overrides below from hiding the Instruction overload of
1060  // getClobberingMemoryAccess.
1062 
1063  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
1064  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1065  const MemoryLocation &) override;
1066 };
1067 
1068 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1069 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1070 
1071 /// Iterator base class used to implement const and non-const iterators
1072 /// over the defining accesses of a MemoryAccess.
1073 template <class T>
1075  : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1076  std::forward_iterator_tag, T, ptrdiff_t, T *,
1077  T *> {
1078  using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1079 
1080 public:
1081  memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1082  memoryaccess_def_iterator_base() = default;
1083 
1084  bool operator==(const memoryaccess_def_iterator_base &Other) const {
1085  return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
1086  }
1087 
1088  // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1089  // block from the operand in constant time (In a PHINode, the uselist has
1090  // both, so it's just subtraction). We provide it as part of the
1091  // iterator to avoid callers having to linear walk to get the block.
1092  // If the operation becomes constant time on MemoryPHI's, this bit of
1093  // abstraction breaking should be removed.
1095  MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1096  assert(MP && "Tried to get phi arg block when not iterating over a PHI");
1097  return MP->getIncomingBlock(ArgNo);
1098  }
1099 
1100  typename BaseT::iterator::pointer operator*() const {
1101  assert(Access && "Tried to access past the end of our iterator");
1102  // Go to the first argument for phis, and the defining access for everything
1103  // else.
1104  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1105  return MP->getIncomingValue(ArgNo);
1106  return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1107  }
1108 
1109  using BaseT::operator++;
1111  assert(Access && "Hit end of iterator");
1112  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1113  if (++ArgNo >= MP->getNumIncomingValues()) {
1114  ArgNo = 0;
1115  Access = nullptr;
1116  }
1117  } else {
1118  Access = nullptr;
1119  }
1120  return *this;
1121  }
1122 
1123 private:
1124  T *Access = nullptr;
1125  unsigned ArgNo = 0;
1126 };
1127 
1129  return memoryaccess_def_iterator(this);
1130 }
1131 
1133  return const_memoryaccess_def_iterator(this);
1134 }
1135 
1137  return memoryaccess_def_iterator();
1138 }
1139 
1142 }
1143 
1144 /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1145 /// and uses in the inverse case.
1146 template <> struct GraphTraits<MemoryAccess *> {
1149 
1150  static NodeRef getEntryNode(NodeRef N) { return N; }
1152  static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1153 };
1154 
1155 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1158 
1159  static NodeRef getEntryNode(NodeRef N) { return N; }
1161  static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1162 };
1163 
1164 /// Provide an iterator that walks defs, giving both the memory access,
1165 /// and the current pointer location, updating the pointer location as it
1166 /// changes due to phi node translation.
1167 ///
1168 /// This iterator, while somewhat specialized, is what most clients actually
1169 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1170 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1171 /// memory location through phi nodes for the user.
1173  : public iterator_facade_base<upward_defs_iterator,
1174  std::forward_iterator_tag,
1175  const MemoryAccessPair> {
1176  using BaseT = upward_defs_iterator::iterator_facade_base;
1177 
1178 public:
1180  : DefIterator(Info.first), Location(Info.second),
1181  OriginalAccess(Info.first) {
1182  CurrentPair.first = nullptr;
1183 
1184  WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1185  fillInCurrentPair();
1186  }
1187 
1188  upward_defs_iterator() { CurrentPair.first = nullptr; }
1189 
1190  bool operator==(const upward_defs_iterator &Other) const {
1191  return DefIterator == Other.DefIterator;
1192  }
1193 
1194  BaseT::iterator::reference operator*() const {
1195  assert(DefIterator != OriginalAccess->defs_end() &&
1196  "Tried to access past the end of our iterator");
1197  return CurrentPair;
1198  }
1199 
1200  using BaseT::operator++;
1202  assert(DefIterator != OriginalAccess->defs_end() &&
1203  "Tried to access past the end of the iterator");
1204  ++DefIterator;
1205  if (DefIterator != OriginalAccess->defs_end())
1206  fillInCurrentPair();
1207  return *this;
1208  }
1209 
1210  BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1211 
1212 private:
1213  void fillInCurrentPair() {
1214  CurrentPair.first = *DefIterator;
1215  if (WalkingPhi && Location.Ptr) {
1216  PHITransAddr Translator(
1217  const_cast<Value *>(Location.Ptr),
1218  OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1219  if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1220  DefIterator.getPhiArgBlock(), nullptr,
1221  false))
1222  if (Translator.getAddr() != Location.Ptr) {
1223  CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1224  return;
1225  }
1226  }
1227  CurrentPair.second = Location;
1228  }
1229 
1230  MemoryAccessPair CurrentPair;
1231  memoryaccess_def_iterator DefIterator;
1232  MemoryLocation Location;
1233  MemoryAccess *OriginalAccess = nullptr;
1234  bool WalkingPhi = false;
1235 };
1236 
1238  return upward_defs_iterator(Pair);
1239 }
1240 
1242 
1245  return make_range(upward_defs_begin(Pair), upward_defs_end());
1246 }
1247 
1248 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1249 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1250 /// comparing against a null def_chain_iterator, this will compare equal only
1251 /// after walking said Phi/liveOnEntry.
1252 ///
1253 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1254 /// access chain, or all the accesses.
1255 ///
1256 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1257 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1258 /// a phi node. The optimized chain walks the clobbering access of a store.
1259 /// So if you are just trying to find, given a store, what the next
1260 /// thing that would clobber the same memory is, you want the optimized chain.
1261 template <class T, bool UseOptimizedChain = false>
1263  : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1264  std::forward_iterator_tag, MemoryAccess *> {
1265  def_chain_iterator() : MA(nullptr) {}
1266  def_chain_iterator(T MA) : MA(MA) {}
1267 
1268  T operator*() const { return MA; }
1269 
1271  // N.B. liveOnEntry has a null defining access.
1272  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1273  if (UseOptimizedChain && MUD->isOptimized())
1274  MA = MUD->getOptimized();
1275  else
1276  MA = MUD->getDefiningAccess();
1277  } else {
1278  MA = nullptr;
1279  }
1280 
1281  return *this;
1282  }
1283 
1284  bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1285 
1286 private:
1287  T MA;
1288 };
1289 
1290 template <class T>
1292 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1293 #ifdef EXPENSIVE_CHECKS
1294  assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1295  "UpTo isn't in the def chain!");
1296 #endif
1298 }
1299 
1300 template <class T>
1303  def_chain_iterator<T, true>(nullptr));
1304 }
1305 
1306 } // end namespace llvm
1307 
1308 #endif // LLVM_ANALYSIS_MEMORYSSA_H
uint64_t CallInst * C
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:799
memoryaccess_def_iterator & operator++()
Definition: MemorySSA.h:1110
BasicBlock * getIncomingBlock(MemoryAccess::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
Definition: MemorySSA.h:552
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getValueID() const
Return an ID for the concrete type of this object.
Definition: Value.h:464
virtual void verify(const MemorySSA *MSSA)
Definition: MemorySSA.h:1047
void unorderedDeleteIncomingValue(const MemoryAccess *MA)
Definition: MemorySSA.h:622
This class represents lattice values for constants.
Definition: AllocatorList.h:24
void unorderedDeleteIncomingIf(Fn &&Pred)
Definition: MemorySSA.h:603
Result(std::unique_ptr< MemorySSA > &&MSSA)
Definition: MemorySSA.h:930
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list...
Definition: MemorySSA.h:176
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
Definition: MemorySSA.h:545
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:255
MemorySSAPrinterPass(raw_ostream &OS)
Definition: MemorySSA.h:945
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
void resetOptimized()
Definition: MemorySSA.h:347
const MemorySSA & getMSSA() const
Definition: MemorySSA.h:965
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess&#39;s for a given basic block.
Definition: MemorySSA.h:751
BasicBlock *const * const_block_iterator
Definition: MemorySSA.h:496
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:397
Extension point for the Value hierarchy.
Definition: DerivedUser.h:28
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:373
BaseT::iterator::reference operator*() const
Definition: MemorySSA.h:1194
void setOptimized(MemoryAccess *)
Definition: MemorySSA.h:682
void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:98
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:1094
memoryaccess_def_iterator defs_begin()
This iterator walks over all of the defs in a given MemoryAccess.
Definition: MemorySSA.h:1128
unsigned second
F(f)
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:573
void(*)(DerivedUser *) DeleteValueTy
Definition: DerivedUser.h:30
This defines the Use class.
AllAccessType::const_self_iterator getIterator() const
Definition: MemorySSA.h:179
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:188
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:532
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
MemoryPhi * getMemoryAccess(const BasicBlock *BB) const
Definition: MemorySSA.h:717
Represents read-only accesses to memory.
Definition: MemorySSA.h:317
This class is a batch walker of all MemoryUse&#39;s in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1194
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:956
Definition: BitVector.h:938
void resetOptimized()
Reset the ID of what this MemoryUse was optimized to, causing it to be rewalked by the walker if nece...
Definition: MemorySSA.h:689
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock *> &Visited)
Definition: MemorySSA.h:818
block_iterator block_begin()
Definition: MemorySSA.h:498
A MemorySSAWalker that does AA walks to disambiguate accesses.
Definition: MemorySSA.cpp:973
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:701
The access may reference the value stored in memory.
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef&#39;s and MemoryPhi&#39;s for a given basic block.
Definition: MemorySSA.h:759
const_block_iterator block_end() const
Definition: MemorySSA.h:511
bool isOptimized() const
Definition: MemorySSA.h:401
Walks the defining accesses of MemoryDefs.
Definition: MemorySSA.h:1262
def_chain_iterator & operator++()
Definition: MemorySSA.h:1270
bool isOptimized() const
Definition: MemorySSA.h:670
BaseT::iterator::pointer operator*() const
Definition: MemorySSA.h:1100
static bool classof(const Value *MA)
Definition: MemorySSA.h:388
iterator_range< block_iterator > blocks()
Definition: MemorySSA.h:515
A simple intrusive list implementation.
Definition: simple_ilist.h:79
Key
PAL metadata keys.
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:41
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1161
static int getID(struct InternalInstruction *insn, const void *miiArg)
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref&#39;ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:713
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:343
AllAccessType::const_reverse_self_iterator getReverseIterator() const
Definition: MemorySSA.h:185
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1241
std::unique_ptr< MemorySSA > MSSA
Definition: MemorySSA.h:934
void unorderedDeleteIncoming(unsigned I)
Definition: MemorySSA.h:587
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:366
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:987
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:68
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:145
op_range incoming_values()
Definition: MemorySSA.h:523
memoryaccess_def_iterator defs_end()
Definition: MemorySSA.h:1136
Analysis containing CSE Info
Definition: CSEInfo.cpp:21
unsigned getID() const
Definition: MemorySSA.h:633
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:531
const_block_iterator block_begin() const
Definition: MemorySSA.h:503
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:93
Use delete by default for iplist and ilist.
Definition: ilist.h:41
static bool runOnFunction(Function &F, bool PostInlining)
static Use * op_end(MemoryUseOrDef *MUD)
Definition: MemorySSA.h:433
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:154
DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const
Definition: MemorySSA.h:197
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:182
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
PointerIntPair - This class implements a pair of a pointer and small integer.
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:36
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
const_op_range incoming_values() const
Definition: MemorySSA.h:525
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
upward_defs_iterator & operator++()
Definition: MemorySSA.h:1201
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:562
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1334
A CRTP mix-in that provides informational APIs needed for analysis passes.
Definition: PassManager.h:383
static unsigned getIncomingValueNumForOperand(unsigned I)
Definition: MemorySSA.h:538
void setIncomingBlock(unsigned I, BasicBlock *BB)
Definition: MemorySSA.h:556
upward_defs_iterator(const MemoryAccessPair &Info)
Definition: MemorySSA.h:1179
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:784
Represent the analysis usage information of a pass.
iterator_range< def_chain_iterator< T, true > > optimized_def_chain(T MA)
Definition: MemorySSA.h:1301
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2058
Printer pass for MemorySSA.
Definition: MemorySSA.h:941
static unsigned getOperandNumForIncomingValue(unsigned I)
Definition: MemorySSA.h:537
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
static bool classof(const Value *MA)
Definition: MemorySSA.h:328
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static unsigned operands(const MemoryUseOrDef *MUD)
Definition: MemorySSA.h:439
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:1210
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.h:1045
Iterator base class used to implement const and non-const iterators over the defining accesses of a M...
Definition: MemorySSA.h:128
#define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS)
Macro for generating in-class operand accessor declarations.
Provide an iterator that walks defs, giving both the memory access, and the current pointer location...
Definition: MemorySSA.h:1172
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1207
An intrusive list with ownership and callbacks specified/controlled by ilist_traits, only with API safe for polymorphic types.
Definition: ilist.h:390
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:676
A MemorySSAWalker that does no alias queries, or anything else.
Definition: MemorySSA.h:1057
std::pair< const MemoryAccess *, MemoryLocation > ConstMemoryAccessPair
Definition: MemorySSA.h:1069
static void deleteNode(MemoryAccess *MA)
Definition: MemorySSA.h:230
unsigned first
bool operator==(const memoryaccess_def_iterator_base &Other) const
Definition: MemorySSA.h:1084
bool isOptimized() const
Definition: MemorySSA.h:339
The two locations may or may not alias. This is the least precise result.
Definition: AliasAnalysis.h:86
Representation for a specific memory location.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=MayAlias)
Definition: MemorySSA.h:297
Iterator for intrusive lists based on ilist_node.
static bool classof(const Value *V)
Definition: MemorySSA.h:627
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
Module.h This file contains the declarations for the Module class.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:920
BasicBlock * getBlock() const
Definition: MemorySSA.h:157
void unorderedDeleteIncomingBlock(const BasicBlock *BB)
Definition: MemorySSA.h:615
MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, unsigned NumOperands)
Definition: MemorySSA.h:282
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:735
Verifier pass for MemorySSA.
Definition: MemorySSA.h:951
A range adaptor for a pair of iterators.
MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds=0)
Definition: MemorySSA.h:487
static Use * op_begin(MemoryUseOrDef *MUD)
Definition: MemorySSA.h:427
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:245
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:528
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:541
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:252
iterator_range< const_block_iterator > blocks() const
Definition: MemorySSA.h:519
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:370
DefsOnlyType::const_self_iterator getDefsIterator() const
Definition: MemorySSA.h:191
void setOptimizedAccessType(Optional< AliasResult > AR)
Definition: MemorySSA.h:293
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1292
memoryaccess_def_iterator_base< MemoryAccess > memoryaccess_def_iterator
Definition: MemorySSA.h:129
block_iterator block_end()
Definition: MemorySSA.h:509
bool operator==(const def_chain_iterator &O) const
Definition: MemorySSA.h:1284
This file provides utility analysis objects describing memory locations.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
user_iterator_impl< User > user_iterator
Definition: Value.h:369
MemoryAccess * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: MemorySSA.h:580
Compile-time customization of User operands.
Definition: User.h:43
MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, BasicBlock *BB, unsigned NumOperands)
Definition: MemorySSA.h:216
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:731
Optional< AliasResult > getOptimizedAccessType() const
Definition: MemorySSA.h:269
raw_ostream & operator<<(raw_ostream &OS, const APInt &I)
Definition: APInt.h:2039
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1160
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef&#39;ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1016
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1151
static bool classof(const Value *MA)
Definition: MemorySSA.h:257
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:376
void setOptimized(MemoryAccess *MA)
Definition: MemorySSA.h:392
static NodeRef getEntryNode(NodeRef N)
Definition: MemorySSA.h:1150
LLVM Value Representation.
Definition: Value.h:73
MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, unsigned Ver)
Definition: MemorySSA.h:379
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:194
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1237
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:96
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:40
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:662
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:46
const_user_iterator const_iterator
Definition: MemorySSA.h:164
IRTranslator LLVM IR MI
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:31
A container for analyses that lazily runs them and caches their results.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:210
MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
Definition: MemorySSA.h:321
static bool classof(const Value *V)
Definition: MemorySSA.h:152
iterator_range< upward_defs_iterator > upward_defs(const MemoryAccessPair &Pair)
Definition: MemorySSA.h:1244
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:479
unsigned getID() const
Definition: MemorySSA.h:412
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1152
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:805
user_iterator iterator
The user iterators for a memory access.
Definition: MemorySSA.h:163
memoryaccess_def_iterator_base< const MemoryAccess > const_memoryaccess_def_iterator
Definition: MemorySSA.h:131
bool operator==(const upward_defs_iterator &Other) const
Definition: MemorySSA.h:1190
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:71
void resetOptimized()
Definition: MemorySSA.h:405
void setOptimized(MemoryAccess *DMA)
Definition: MemorySSA.h:334
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:1068
void allocHungoffUses(unsigned N)
this is more complicated than the generic User::allocHungoffUses, because we have to allocate Uses fo...
Definition: MemorySSA.h:641
user_iterator user_end()
Definition: Value.h:384