LLVM  8.0.1
DeadStoreElimination.cpp
Go to the documentation of this file.
1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
12 //
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
15 //
16 //===----------------------------------------------------------------------===//
17 
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/Dominators.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/PassManager.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/Debug.h"
59 #include "llvm/Transforms/Scalar.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstddef>
63 #include <cstdint>
64 #include <iterator>
65 #include <map>
66 #include <utility>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "dse"
71 
72 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
73 STATISTIC(NumFastStores, "Number of stores deleted");
74 STATISTIC(NumFastOther, "Number of other instrs removed");
75 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
76 STATISTIC(NumModifiedStores, "Number of stores modified");
77 
78 static cl::opt<bool>
79 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
80  cl::init(true), cl::Hidden,
81  cl::desc("Enable partial-overwrite tracking in DSE"));
82 
83 static cl::opt<bool>
84 EnablePartialStoreMerging("enable-dse-partial-store-merging",
85  cl::init(true), cl::Hidden,
86  cl::desc("Enable partial store merging in DSE"));
87 
88 //===----------------------------------------------------------------------===//
89 // Helper functions
90 //===----------------------------------------------------------------------===//
91 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
93 
94 /// Delete this instruction. Before we do, go through and zero out all the
95 /// operands of this instruction. If any of them become dead, delete them and
96 /// the computation tree that feeds them.
97 /// If ValueSet is non-null, remove any deleted instructions from it as well.
98 static void
102  DenseMap<Instruction*, size_t> *InstrOrdering,
103  SmallSetVector<Value *, 16> *ValueSet = nullptr) {
104  SmallVector<Instruction*, 32> NowDeadInsts;
105 
106  NowDeadInsts.push_back(I);
107  --NumFastOther;
108 
109  // Keeping the iterator straight is a pain, so we let this routine tell the
110  // caller what the next instruction is after we're done mucking about.
111  BasicBlock::iterator NewIter = *BBI;
112 
113  // Before we touch this instruction, remove it from memdep!
114  do {
115  Instruction *DeadInst = NowDeadInsts.pop_back_val();
116  ++NumFastOther;
117 
118  // Try to preserve debug information attached to the dead instruction.
119  salvageDebugInfo(*DeadInst);
120 
121  // This instruction is dead, zap it, in stages. Start by removing it from
122  // MemDep, which needs to know the operands and needs it to be in the
123  // function.
124  MD.removeInstruction(DeadInst);
125 
126  for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
127  Value *Op = DeadInst->getOperand(op);
128  DeadInst->setOperand(op, nullptr);
129 
130  // If this operand just became dead, add it to the NowDeadInsts list.
131  if (!Op->use_empty()) continue;
132 
133  if (Instruction *OpI = dyn_cast<Instruction>(Op))
134  if (isInstructionTriviallyDead(OpI, &TLI))
135  NowDeadInsts.push_back(OpI);
136  }
137 
138  if (ValueSet) ValueSet->remove(DeadInst);
139  InstrOrdering->erase(DeadInst);
140  IOL.erase(DeadInst);
141 
142  if (NewIter == DeadInst->getIterator())
143  NewIter = DeadInst->eraseFromParent();
144  else
145  DeadInst->eraseFromParent();
146  } while (!NowDeadInsts.empty());
147  *BBI = NewIter;
148 }
149 
150 /// Does this instruction write some memory? This only returns true for things
151 /// that we can analyze with other helpers below.
153  const TargetLibraryInfo &TLI) {
154  if (isa<StoreInst>(I))
155  return true;
156  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
157  switch (II->getIntrinsicID()) {
158  default:
159  return false;
160  case Intrinsic::memset:
161  case Intrinsic::memmove:
162  case Intrinsic::memcpy:
168  return true;
169  }
170  }
171  if (auto CS = CallSite(I)) {
172  if (Function *F = CS.getCalledFunction()) {
173  StringRef FnName = F->getName();
174  if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
175  return true;
176  if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
177  return true;
178  if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
179  return true;
180  if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
181  return true;
182  }
183  }
184  return false;
185 }
186 
187 /// Return a Location stored to by the specified instruction. If isRemovable
188 /// returns true, this function and getLocForRead completely describe the memory
189 /// operations for this instruction.
191 
192  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
193  return MemoryLocation::get(SI);
194 
195  if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
196  // memcpy/memmove/memset.
198  return Loc;
199  }
200 
201  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
202  switch (II->getIntrinsicID()) {
203  default:
204  return MemoryLocation(); // Unhandled intrinsic.
206  return MemoryLocation(II->getArgOperand(0));
208  uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
209  return MemoryLocation(II->getArgOperand(1), Len);
210  }
211  }
212  }
213  if (auto CS = CallSite(Inst))
214  // All the supported TLI functions so far happen to have dest as their
215  // first argument.
216  return MemoryLocation(CS.getArgument(0));
217  return MemoryLocation();
218 }
219 
220 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
221 /// instruction if any.
223  const TargetLibraryInfo &TLI) {
224  assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
225 
226  // The only instructions that both read and write are the mem transfer
227  // instructions (memcpy/memmove).
228  if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
229  return MemoryLocation::getForSource(MTI);
230  return MemoryLocation();
231 }
232 
233 /// If the value of this instruction and the memory it writes to is unused, may
234 /// we delete this instruction?
235 static bool isRemovable(Instruction *I) {
236  // Don't remove volatile/atomic stores.
237  if (StoreInst *SI = dyn_cast<StoreInst>(I))
238  return SI->isUnordered();
239 
240  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
241  switch (II->getIntrinsicID()) {
242  default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
244  // Never remove dead lifetime_end's, e.g. because it is followed by a
245  // free.
246  return false;
248  // Always safe to remove init_trampoline.
249  return true;
250  case Intrinsic::memset:
251  case Intrinsic::memmove:
252  case Intrinsic::memcpy:
253  // Don't remove volatile memory intrinsics.
254  return !cast<MemIntrinsic>(II)->isVolatile();
258  return true;
259  }
260  }
261 
262  // note: only get here for calls with analyzable writes - i.e. libcalls
263  if (auto CS = CallSite(I))
264  return CS.getInstruction()->use_empty();
265 
266  return false;
267 }
268 
269 /// Returns true if the end of this instruction can be safely shortened in
270 /// length.
272  // Don't shorten stores for now
273  if (isa<StoreInst>(I))
274  return false;
275 
276  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
277  switch (II->getIntrinsicID()) {
278  default: return false;
279  case Intrinsic::memset:
280  case Intrinsic::memcpy:
283  // Do shorten memory intrinsics.
284  // FIXME: Add memmove if it's also safe to transform.
285  return true;
286  }
287  }
288 
289  // Don't shorten libcalls calls for now.
290 
291  return false;
292 }
293 
294 /// Returns true if the beginning of this instruction can be safely shortened
295 /// in length.
297  // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
298  // easily done by offsetting the source address.
299  return isa<AnyMemSetInst>(I);
300 }
301 
302 /// Return the pointer that is being written to.
304  //TODO: factor this to reuse getLocForWrite
306  assert(Loc.Ptr &&
307  "unable to find pointer written for analyzable instruction?");
308  // TODO: most APIs don't expect const Value *
309  return const_cast<Value*>(Loc.Ptr);
310 }
311 
312 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
313  const TargetLibraryInfo &TLI,
314  const Function *F) {
315  uint64_t Size;
316  ObjectSizeOpts Opts;
318 
319  if (getObjectSize(V, Size, DL, &TLI, Opts))
320  return Size;
322 }
323 
324 namespace {
325 
327  OW_Begin,
328  OW_Complete,
329  OW_End,
330  OW_PartialEarlierWithFullLater,
331  OW_Unknown
332 };
333 
334 } // end anonymous namespace
335 
336 /// Return 'OW_Complete' if a store to the 'Later' location completely
337 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
338 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
339 /// beginning of the 'Earlier' location is overwritten by 'Later'.
340 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
341 /// overwritten by a latter (smaller) store which doesn't write outside the big
342 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
344  const MemoryLocation &Earlier,
345  const DataLayout &DL,
346  const TargetLibraryInfo &TLI,
347  int64_t &EarlierOff, int64_t &LaterOff,
348  Instruction *DepWrite,
350  AliasAnalysis &AA,
351  const Function *F) {
352  // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
353  // get imprecise values here, though (except for unknown sizes).
354  if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise())
355  return OW_Unknown;
356 
357  const uint64_t LaterSize = Later.Size.getValue();
358  const uint64_t EarlierSize = Earlier.Size.getValue();
359 
360  const Value *P1 = Earlier.Ptr->stripPointerCasts();
361  const Value *P2 = Later.Ptr->stripPointerCasts();
362 
363  // If the start pointers are the same, we just have to compare sizes to see if
364  // the later store was larger than the earlier store.
365  if (P1 == P2 || AA.isMustAlias(P1, P2)) {
366  // Make sure that the Later size is >= the Earlier size.
367  if (LaterSize >= EarlierSize)
368  return OW_Complete;
369  }
370 
371  // Check to see if the later store is to the entire object (either a global,
372  // an alloca, or a byval/inalloca argument). If so, then it clearly
373  // overwrites any other store to the same object.
374  const Value *UO1 = GetUnderlyingObject(P1, DL),
375  *UO2 = GetUnderlyingObject(P2, DL);
376 
377  // If we can't resolve the same pointers to the same object, then we can't
378  // analyze them at all.
379  if (UO1 != UO2)
380  return OW_Unknown;
381 
382  // If the "Later" store is to a recognizable object, get its size.
383  uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
384  if (ObjectSize != MemoryLocation::UnknownSize)
385  if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
386  return OW_Complete;
387 
388  // Okay, we have stores to two completely different pointers. Try to
389  // decompose the pointer into a "base + constant_offset" form. If the base
390  // pointers are equal, then we can reason about the two stores.
391  EarlierOff = 0;
392  LaterOff = 0;
393  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
394  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
395 
396  // If the base pointers still differ, we have two completely different stores.
397  if (BP1 != BP2)
398  return OW_Unknown;
399 
400  // The later store completely overlaps the earlier store if:
401  //
402  // 1. Both start at the same offset and the later one's size is greater than
403  // or equal to the earlier one's, or
404  //
405  // |--earlier--|
406  // |-- later --|
407  //
408  // 2. The earlier store has an offset greater than the later offset, but which
409  // still lies completely within the later store.
410  //
411  // |--earlier--|
412  // |----- later ------|
413  //
414  // We have to be careful here as *Off is signed while *.Size is unsigned.
415  if (EarlierOff >= LaterOff &&
416  LaterSize >= EarlierSize &&
417  uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
418  return OW_Complete;
419 
420  // We may now overlap, although the overlap is not complete. There might also
421  // be other incomplete overlaps, and together, they might cover the complete
422  // earlier write.
423  // Note: The correctness of this logic depends on the fact that this function
424  // is not even called providing DepWrite when there are any intervening reads.
426  LaterOff < int64_t(EarlierOff + EarlierSize) &&
427  int64_t(LaterOff + LaterSize) >= EarlierOff) {
428 
429  // Insert our part of the overlap into the map.
430  auto &IM = IOL[DepWrite];
431  LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
432  << ", " << int64_t(EarlierOff + EarlierSize)
433  << ") Later [" << LaterOff << ", "
434  << int64_t(LaterOff + LaterSize) << ")\n");
435 
436  // Make sure that we only insert non-overlapping intervals and combine
437  // adjacent intervals. The intervals are stored in the map with the ending
438  // offset as the key (in the half-open sense) and the starting offset as
439  // the value.
440  int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
441 
442  // Find any intervals ending at, or after, LaterIntStart which start
443  // before LaterIntEnd.
444  auto ILI = IM.lower_bound(LaterIntStart);
445  if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
446  // This existing interval is overlapped with the current store somewhere
447  // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
448  // intervals and adjusting our start and end.
449  LaterIntStart = std::min(LaterIntStart, ILI->second);
450  LaterIntEnd = std::max(LaterIntEnd, ILI->first);
451  ILI = IM.erase(ILI);
452 
453  // Continue erasing and adjusting our end in case other previous
454  // intervals are also overlapped with the current store.
455  //
456  // |--- ealier 1 ---| |--- ealier 2 ---|
457  // |------- later---------|
458  //
459  while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
460  assert(ILI->second > LaterIntStart && "Unexpected interval");
461  LaterIntEnd = std::max(LaterIntEnd, ILI->first);
462  ILI = IM.erase(ILI);
463  }
464  }
465 
466  IM[LaterIntEnd] = LaterIntStart;
467 
468  ILI = IM.begin();
469  if (ILI->second <= EarlierOff &&
470  ILI->first >= int64_t(EarlierOff + EarlierSize)) {
471  LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
472  << EarlierOff << ", "
473  << int64_t(EarlierOff + EarlierSize)
474  << ") Composite Later [" << ILI->second << ", "
475  << ILI->first << ")\n");
476  ++NumCompletePartials;
477  return OW_Complete;
478  }
479  }
480 
481  // Check for an earlier store which writes to all the memory locations that
482  // the later store writes to.
483  if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
484  int64_t(EarlierOff + EarlierSize) > LaterOff &&
485  uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
486  LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
487  << EarlierOff << ", "
488  << int64_t(EarlierOff + EarlierSize)
489  << ") by a later store [" << LaterOff << ", "
490  << int64_t(LaterOff + LaterSize) << ")\n");
491  // TODO: Maybe come up with a better name?
492  return OW_PartialEarlierWithFullLater;
493  }
494 
495  // Another interesting case is if the later store overwrites the end of the
496  // earlier store.
497  //
498  // |--earlier--|
499  // |-- later --|
500  //
501  // In this case we may want to trim the size of earlier to avoid generating
502  // writes to addresses which will definitely be overwritten later
504  (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
505  int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
506  return OW_End;
507 
508  // Finally, we also need to check if the later store overwrites the beginning
509  // of the earlier store.
510  //
511  // |--earlier--|
512  // |-- later --|
513  //
514  // In this case we may want to move the destination address and trim the size
515  // of earlier to avoid generating writes to addresses which will definitely
516  // be overwritten later.
518  (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
519  assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
520  "Expect to be handled as OW_Complete");
521  return OW_Begin;
522  }
523  // Otherwise, they don't completely overlap.
524  return OW_Unknown;
525 }
526 
527 /// If 'Inst' might be a self read (i.e. a noop copy of a
528 /// memory region into an identical pointer) then it doesn't actually make its
529 /// input dead in the traditional sense. Consider this case:
530 ///
531 /// memmove(A <- B)
532 /// memmove(A <- A)
533 ///
534 /// In this case, the second store to A does not make the first store to A dead.
535 /// The usual situation isn't an explicit A<-A store like this (which can be
536 /// trivially removed) but a case where two pointers may alias.
537 ///
538 /// This function detects when it is unsafe to remove a dependent instruction
539 /// because the DSE inducing instruction may be a self-read.
540 static bool isPossibleSelfRead(Instruction *Inst,
541  const MemoryLocation &InstStoreLoc,
542  Instruction *DepWrite,
543  const TargetLibraryInfo &TLI,
544  AliasAnalysis &AA) {
545  // Self reads can only happen for instructions that read memory. Get the
546  // location read.
547  MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
548  if (!InstReadLoc.Ptr)
549  return false; // Not a reading instruction.
550 
551  // If the read and written loc obviously don't alias, it isn't a read.
552  if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
553  return false;
554 
555  if (isa<AnyMemCpyInst>(Inst)) {
556  // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
557  // but in practice memcpy(A <- B) either means that A and B are disjoint or
558  // are equal (i.e. there are not partial overlaps). Given that, if we have:
559  //
560  // memcpy/memmove(A <- B) // DepWrite
561  // memcpy(A <- B) // Inst
562  //
563  // with Inst reading/writing a >= size than DepWrite, we can reason as
564  // follows:
565  //
566  // - If A == B then both the copies are no-ops, so the DepWrite can be
567  // removed.
568  // - If A != B then A and B are disjoint locations in Inst. Since
569  // Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
570  // Therefore DepWrite can be removed.
571  MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
572 
573  if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
574  return false;
575  }
576 
577  // If DepWrite doesn't read memory or if we can't prove it is a must alias,
578  // then it can't be considered dead.
579  return true;
580 }
581 
582 /// Returns true if the memory which is accessed by the second instruction is not
583 /// modified between the first and the second instruction.
584 /// Precondition: Second instruction must be dominated by the first
585 /// instruction.
587  Instruction *SecondI,
588  AliasAnalysis *AA) {
591  BasicBlock::iterator FirstBBI(FirstI);
592  ++FirstBBI;
593  BasicBlock::iterator SecondBBI(SecondI);
594  BasicBlock *FirstBB = FirstI->getParent();
595  BasicBlock *SecondBB = SecondI->getParent();
596  MemoryLocation MemLoc = MemoryLocation::get(SecondI);
597 
598  // Start checking the store-block.
599  WorkList.push_back(SecondBB);
600  bool isFirstBlock = true;
601 
602  // Check all blocks going backward until we reach the load-block.
603  while (!WorkList.empty()) {
604  BasicBlock *B = WorkList.pop_back_val();
605 
606  // Ignore instructions before LI if this is the FirstBB.
607  BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
608 
610  if (isFirstBlock) {
611  // Ignore instructions after SI if this is the first visit of SecondBB.
612  assert(B == SecondBB && "first block is not the store block");
613  EI = SecondBBI;
614  isFirstBlock = false;
615  } else {
616  // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
617  // In this case we also have to look at instructions after SI.
618  EI = B->end();
619  }
620  for (; BI != EI; ++BI) {
621  Instruction *I = &*BI;
622  if (I->mayWriteToMemory() && I != SecondI)
623  if (isModSet(AA->getModRefInfo(I, MemLoc)))
624  return false;
625  }
626  if (B != FirstBB) {
627  assert(B != &FirstBB->getParent()->getEntryBlock() &&
628  "Should not hit the entry block because SI must be dominated by LI");
629  for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
630  if (!Visited.insert(*PredI).second)
631  continue;
632  WorkList.push_back(*PredI);
633  }
634  }
635  }
636  return true;
637 }
638 
639 /// Find all blocks that will unconditionally lead to the block BB and append
640 /// them to F.
642  BasicBlock *BB, DominatorTree *DT) {
643  for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
644  BasicBlock *Pred = *I;
645  if (Pred == BB) continue;
646  Instruction *PredTI = Pred->getTerminator();
647  if (PredTI->getNumSuccessors() != 1)
648  continue;
649 
650  if (DT->isReachableFromEntry(Pred))
651  Blocks.push_back(Pred);
652  }
653 }
654 
655 /// Handle frees of entire structures whose dependency is a store
656 /// to a field of that structure.
657 static bool handleFree(CallInst *F, AliasAnalysis *AA,
659  const TargetLibraryInfo *TLI,
661  DenseMap<Instruction*, size_t> *InstrOrdering) {
662  bool MadeChange = false;
663 
666  Blocks.push_back(F->getParent());
667  const DataLayout &DL = F->getModule()->getDataLayout();
668 
669  while (!Blocks.empty()) {
670  BasicBlock *BB = Blocks.pop_back_val();
671  Instruction *InstPt = BB->getTerminator();
672  if (BB == F->getParent()) InstPt = F;
673 
674  MemDepResult Dep =
675  MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
676  while (Dep.isDef() || Dep.isClobber()) {
677  Instruction *Dependency = Dep.getInst();
678  if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
679  !isRemovable(Dependency))
680  break;
681 
682  Value *DepPointer =
684 
685  // Check for aliasing.
686  if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
687  break;
688 
689  LLVM_DEBUG(
690  dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
691  << *Dependency << '\n');
692 
693  // DCE instructions only used to calculate that store.
694  BasicBlock::iterator BBI(Dependency);
695  deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
696  ++NumFastStores;
697  MadeChange = true;
698 
699  // Inst's old Dependency is now deleted. Compute the next dependency,
700  // which may also be dead, as in
701  // s[0] = 0;
702  // s[1] = 0; // This has just been deleted.
703  // free(s);
704  Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
705  }
706 
707  if (Dep.isNonLocal())
708  findUnconditionalPreds(Blocks, BB, DT);
709  }
710 
711  return MadeChange;
712 }
713 
714 /// Check to see if the specified location may alias any of the stack objects in
715 /// the DeadStackObjects set. If so, they become live because the location is
716 /// being loaded.
717 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
718  SmallSetVector<Value *, 16> &DeadStackObjects,
719  const DataLayout &DL, AliasAnalysis *AA,
720  const TargetLibraryInfo *TLI,
721  const Function *F) {
722  const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
723 
724  // A constant can't be in the dead pointer set.
725  if (isa<Constant>(UnderlyingPointer))
726  return;
727 
728  // If the kill pointer can be easily reduced to an alloca, don't bother doing
729  // extraneous AA queries.
730  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
731  DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
732  return;
733  }
734 
735  // Remove objects that could alias LoadedLoc.
736  DeadStackObjects.remove_if([&](Value *I) {
737  // See if the loaded location could alias the stack location.
738  MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
739  return !AA->isNoAlias(StackLoc, LoadedLoc);
740  });
741 }
742 
743 /// Remove dead stores to stack-allocated locations in the function end block.
744 /// Ex:
745 /// %A = alloca i32
746 /// ...
747 /// store i32 1, i32* %A
748 /// ret void
751  const TargetLibraryInfo *TLI,
753  DenseMap<Instruction*, size_t> *InstrOrdering) {
754  bool MadeChange = false;
755 
756  // Keep track of all of the stack objects that are dead at the end of the
757  // function.
758  SmallSetVector<Value*, 16> DeadStackObjects;
759 
760  // Find all of the alloca'd pointers in the entry block.
761  BasicBlock &Entry = BB.getParent()->front();
762  for (Instruction &I : Entry) {
763  if (isa<AllocaInst>(&I))
764  DeadStackObjects.insert(&I);
765 
766  // Okay, so these are dead heap objects, but if the pointer never escapes
767  // then it's leaked by this function anyways.
768  else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
769  DeadStackObjects.insert(&I);
770  }
771 
772  // Treat byval or inalloca arguments the same, stores to them are dead at the
773  // end of the function.
774  for (Argument &AI : BB.getParent()->args())
775  if (AI.hasByValOrInAllocaAttr())
776  DeadStackObjects.insert(&AI);
777 
778  const DataLayout &DL = BB.getModule()->getDataLayout();
779 
780  // Scan the basic block backwards
781  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
782  --BBI;
783 
784  // If we find a store, check to see if it points into a dead stack value.
785  if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
786  // See through pointer-to-pointer bitcasts
787  SmallVector<Value *, 4> Pointers;
788  GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
789 
790  // Stores to stack values are valid candidates for removal.
791  bool AllDead = true;
792  for (Value *Pointer : Pointers)
793  if (!DeadStackObjects.count(Pointer)) {
794  AllDead = false;
795  break;
796  }
797 
798  if (AllDead) {
799  Instruction *Dead = &*BBI;
800 
801  LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
802  << *Dead << "\n Objects: ";
803  for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
804  E = Pointers.end();
805  I != E; ++I) {
806  dbgs() << **I;
807  if (std::next(I) != E)
808  dbgs() << ", ";
809  } dbgs()
810  << '\n');
811 
812  // DCE instructions only used to calculate that store.
813  deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
814  ++NumFastStores;
815  MadeChange = true;
816  continue;
817  }
818  }
819 
820  // Remove any dead non-memory-mutating instructions.
821  if (isInstructionTriviallyDead(&*BBI, TLI)) {
822  LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
823  << *&*BBI << '\n');
824  deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
825  ++NumFastOther;
826  MadeChange = true;
827  continue;
828  }
829 
830  if (isa<AllocaInst>(BBI)) {
831  // Remove allocas from the list of dead stack objects; there can't be
832  // any references before the definition.
833  DeadStackObjects.remove(&*BBI);
834  continue;
835  }
836 
837  if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
838  // Remove allocation function calls from the list of dead stack objects;
839  // there can't be any references before the definition.
840  if (isAllocLikeFn(&*BBI, TLI))
841  DeadStackObjects.remove(&*BBI);
842 
843  // If this call does not access memory, it can't be loading any of our
844  // pointers.
845  if (AA->doesNotAccessMemory(Call))
846  continue;
847 
848  // If the call might load from any of our allocas, then any store above
849  // the call is live.
850  DeadStackObjects.remove_if([&](Value *I) {
851  // See if the call site touches the value.
852  return isRefSet(AA->getModRefInfo(
853  Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
854  });
855 
856  // If all of the allocas were clobbered by the call then we're not going
857  // to find anything else to process.
858  if (DeadStackObjects.empty())
859  break;
860 
861  continue;
862  }
863 
864  // We can remove the dead stores, irrespective of the fence and its ordering
865  // (release/acquire/seq_cst). Fences only constraints the ordering of
866  // already visible stores, it does not make a store visible to other
867  // threads. So, skipping over a fence does not change a store from being
868  // dead.
869  if (isa<FenceInst>(*BBI))
870  continue;
871 
872  MemoryLocation LoadedLoc;
873 
874  // If we encounter a use of the pointer, it is no longer considered dead
875  if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
876  if (!L->isUnordered()) // Be conservative with atomic/volatile load
877  break;
878  LoadedLoc = MemoryLocation::get(L);
879  } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
880  LoadedLoc = MemoryLocation::get(V);
881  } else if (!BBI->mayReadFromMemory()) {
882  // Instruction doesn't read memory. Note that stores that weren't removed
883  // above will hit this case.
884  continue;
885  } else {
886  // Unknown inst; assume it clobbers everything.
887  break;
888  }
889 
890  // Remove any allocas from the DeadPointer set that are loaded, as this
891  // makes any stores above the access live.
892  removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
893 
894  // If all of the allocas were clobbered by the access then we're not going
895  // to find anything else to process.
896  if (DeadStackObjects.empty())
897  break;
898  }
899 
900  return MadeChange;
901 }
902 
903 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
904  int64_t &EarlierSize, int64_t LaterOffset,
905  int64_t LaterSize, bool IsOverwriteEnd) {
906  // TODO: base this on the target vector size so that if the earlier
907  // store was too small to get vector writes anyway then its likely
908  // a good idea to shorten it
909  // Power of 2 vector writes are probably always a bad idea to optimize
910  // as any store/memset/memcpy is likely using vector instructions so
911  // shortening it to not vector size is likely to be slower
912  auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
913  unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
914  if (!IsOverwriteEnd)
915  LaterOffset = int64_t(LaterOffset + LaterSize);
916 
917  if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
918  !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
919  return false;
920 
921  int64_t NewLength = IsOverwriteEnd
922  ? LaterOffset - EarlierOffset
923  : EarlierSize - (LaterOffset - EarlierOffset);
924 
925  if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
926  // When shortening an atomic memory intrinsic, the newly shortened
927  // length must remain an integer multiple of the element size.
928  const uint32_t ElementSize = AMI->getElementSizeInBytes();
929  if (0 != NewLength % ElementSize)
930  return false;
931  }
932 
933  LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
934  << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
935  << *EarlierWrite << "\n KILLER (offset " << LaterOffset
936  << ", " << EarlierSize << ")\n");
937 
938  Value *EarlierWriteLength = EarlierIntrinsic->getLength();
939  Value *TrimmedLength =
940  ConstantInt::get(EarlierWriteLength->getType(), NewLength);
941  EarlierIntrinsic->setLength(TrimmedLength);
942 
943  EarlierSize = NewLength;
944  if (!IsOverwriteEnd) {
945  int64_t OffsetMoved = (LaterOffset - EarlierOffset);
946  Value *Indices[1] = {
947  ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
949  EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
950  EarlierIntrinsic->setDest(NewDestGEP);
951  EarlierOffset = EarlierOffset + OffsetMoved;
952  }
953  return true;
954 }
955 
956 static bool tryToShortenEnd(Instruction *EarlierWrite,
958  int64_t &EarlierStart, int64_t &EarlierSize) {
959  if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
960  return false;
961 
962  OverlapIntervalsTy::iterator OII = --IntervalMap.end();
963  int64_t LaterStart = OII->second;
964  int64_t LaterSize = OII->first - LaterStart;
965 
966  if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
967  LaterStart + LaterSize >= EarlierStart + EarlierSize) {
968  if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
969  LaterSize, true)) {
970  IntervalMap.erase(OII);
971  return true;
972  }
973  }
974  return false;
975 }
976 
977 static bool tryToShortenBegin(Instruction *EarlierWrite,
979  int64_t &EarlierStart, int64_t &EarlierSize) {
980  if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
981  return false;
982 
983  OverlapIntervalsTy::iterator OII = IntervalMap.begin();
984  int64_t LaterStart = OII->second;
985  int64_t LaterSize = OII->first - LaterStart;
986 
987  if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
988  assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
989  "Should have been handled as OW_Complete");
990  if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
991  LaterSize, false)) {
992  IntervalMap.erase(OII);
993  return true;
994  }
995  }
996  return false;
997 }
998 
1000  const DataLayout &DL,
1001  InstOverlapIntervalsTy &IOL) {
1002  bool Changed = false;
1003  for (auto OI : IOL) {
1004  Instruction *EarlierWrite = OI.first;
1005  MemoryLocation Loc = getLocForWrite(EarlierWrite);
1006  assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1007 
1008  const Value *Ptr = Loc.Ptr->stripPointerCasts();
1009  int64_t EarlierStart = 0;
1010  int64_t EarlierSize = int64_t(Loc.Size.getValue());
1011  GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1012  OverlapIntervalsTy &IntervalMap = OI.second;
1013  Changed |=
1014  tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1015  if (IntervalMap.empty())
1016  continue;
1017  Changed |=
1018  tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1019  }
1020  return Changed;
1021 }
1022 
1025  const DataLayout &DL,
1026  const TargetLibraryInfo *TLI,
1028  DenseMap<Instruction*, size_t> *InstrOrdering) {
1029  // Must be a store instruction.
1030  StoreInst *SI = dyn_cast<StoreInst>(Inst);
1031  if (!SI)
1032  return false;
1033 
1034  // If we're storing the same value back to a pointer that we just loaded from,
1035  // then the store can be removed.
1036  if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1037  if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1038  isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1039 
1040  LLVM_DEBUG(
1041  dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
1042  << *DepLoad << "\n STORE: " << *SI << '\n');
1043 
1044  deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1045  ++NumRedundantStores;
1046  return true;
1047  }
1048  }
1049 
1050  // Remove null stores into the calloc'ed objects
1051  Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1052  if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1053  Instruction *UnderlyingPointer =
1055 
1056  if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1057  memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
1058  LLVM_DEBUG(
1059  dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
1060  << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
1061 
1062  deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1063  ++NumRedundantStores;
1064  return true;
1065  }
1066  }
1067  return false;
1068 }
1069 
1072  const TargetLibraryInfo *TLI) {
1073  const DataLayout &DL = BB.getModule()->getDataLayout();
1074  bool MadeChange = false;
1075 
1076  // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
1077  // The current OrderedBasicBlock can't deal with mutation at the moment.
1078  size_t LastThrowingInstIndex = 0;
1079  DenseMap<Instruction*, size_t> InstrOrdering;
1080  size_t InstrIndex = 1;
1081 
1082  // A map of interval maps representing partially-overwritten value parts.
1084 
1085  // Do a top-down walk on the BB.
1086  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1087  // Handle 'free' calls specially.
1088  if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1089  MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
1090  // Increment BBI after handleFree has potentially deleted instructions.
1091  // This ensures we maintain a valid iterator.
1092  ++BBI;
1093  continue;
1094  }
1095 
1096  Instruction *Inst = &*BBI++;
1097 
1098  size_t CurInstNumber = InstrIndex++;
1099  InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1100  if (Inst->mayThrow()) {
1101  LastThrowingInstIndex = CurInstNumber;
1102  continue;
1103  }
1104 
1105  // Check to see if Inst writes to memory. If not, continue.
1106  if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1107  continue;
1108 
1109  // eliminateNoopStore will update in iterator, if necessary.
1110  if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
1111  MadeChange = true;
1112  continue;
1113  }
1114 
1115  // If we find something that writes memory, get its memory dependence.
1116  MemDepResult InstDep = MD->getDependency(Inst);
1117 
1118  // Ignore any store where we can't find a local dependence.
1119  // FIXME: cross-block DSE would be fun. :)
1120  if (!InstDep.isDef() && !InstDep.isClobber())
1121  continue;
1122 
1123  // Figure out what location is being stored to.
1124  MemoryLocation Loc = getLocForWrite(Inst);
1125 
1126  // If we didn't get a useful location, fail.
1127  if (!Loc.Ptr)
1128  continue;
1129 
1130  // Loop until we find a store we can eliminate or a load that
1131  // invalidates the analysis. Without an upper bound on the number of
1132  // instructions examined, this analysis can become very time-consuming.
1133  // However, the potential gain diminishes as we process more instructions
1134  // without eliminating any of them. Therefore, we limit the number of
1135  // instructions we look at.
1136  auto Limit = MD->getDefaultBlockScanLimit();
1137  while (InstDep.isDef() || InstDep.isClobber()) {
1138  // Get the memory clobbered by the instruction we depend on. MemDep will
1139  // skip any instructions that 'Loc' clearly doesn't interact with. If we
1140  // end up depending on a may- or must-aliased load, then we can't optimize
1141  // away the store and we bail out. However, if we depend on something
1142  // that overwrites the memory location we *can* potentially optimize it.
1143  //
1144  // Find out what memory location the dependent instruction stores.
1145  Instruction *DepWrite = InstDep.getInst();
1146  if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1147  break;
1148  MemoryLocation DepLoc = getLocForWrite(DepWrite);
1149  // If we didn't get a useful location, or if it isn't a size, bail out.
1150  if (!DepLoc.Ptr)
1151  break;
1152 
1153  // Make sure we don't look past a call which might throw. This is an
1154  // issue because MemoryDependenceAnalysis works in the wrong direction:
1155  // it finds instructions which dominate the current instruction, rather than
1156  // instructions which are post-dominated by the current instruction.
1157  //
1158  // If the underlying object is a non-escaping memory allocation, any store
1159  // to it is dead along the unwind edge. Otherwise, we need to preserve
1160  // the store.
1161  size_t DepIndex = InstrOrdering.lookup(DepWrite);
1162  assert(DepIndex && "Unexpected instruction");
1163  if (DepIndex <= LastThrowingInstIndex) {
1164  const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1165  bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1166  if (!IsStoreDeadOnUnwind) {
1167  // We're looking for a call to an allocation function
1168  // where the allocation doesn't escape before the last
1169  // throwing instruction; PointerMayBeCaptured
1170  // reasonably fast approximation.
1171  IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1172  !PointerMayBeCaptured(Underlying, false, true);
1173  }
1174  if (!IsStoreDeadOnUnwind)
1175  break;
1176  }
1177 
1178  // If we find a write that is a) removable (i.e., non-volatile), b) is
1179  // completely obliterated by the store to 'Loc', and c) which we know that
1180  // 'Inst' doesn't load from, then we can remove it.
1181  // Also try to merge two stores if a later one only touches memory written
1182  // to by the earlier one.
1183  if (isRemovable(DepWrite) &&
1184  !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1185  int64_t InstWriteOffset, DepWriteOffset;
1186  OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
1187  InstWriteOffset, DepWrite, IOL, *AA,
1188  BB.getParent());
1189  if (OR == OW_Complete) {
1190  LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite
1191  << "\n KILLER: " << *Inst << '\n');
1192 
1193  // Delete the store and now-dead instructions that feed it.
1194  deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1195  ++NumFastStores;
1196  MadeChange = true;
1197 
1198  // We erased DepWrite; start over.
1199  InstDep = MD->getDependency(Inst);
1200  continue;
1201  } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1202  ((OR == OW_Begin &&
1203  isShortenableAtTheBeginning(DepWrite)))) {
1204  assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1205  "when partial-overwrite "
1206  "tracking is enabled");
1207  // The overwrite result is known, so these must be known, too.
1208  int64_t EarlierSize = DepLoc.Size.getValue();
1209  int64_t LaterSize = Loc.Size.getValue();
1210  bool IsOverwriteEnd = (OR == OW_End);
1211  MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1212  InstWriteOffset, LaterSize, IsOverwriteEnd);
1213  } else if (EnablePartialStoreMerging &&
1214  OR == OW_PartialEarlierWithFullLater) {
1215  auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1216  auto *Later = dyn_cast<StoreInst>(Inst);
1217  if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1218  Later && isa<ConstantInt>(Later->getValueOperand()) &&
1219  memoryIsNotModifiedBetween(Earlier, Later, AA)) {
1220  // If the store we find is:
1221  // a) partially overwritten by the store to 'Loc'
1222  // b) the later store is fully contained in the earlier one and
1223  // c) they both have a constant value
1224  // Merge the two stores, replacing the earlier store's value with a
1225  // merge of both values.
1226  // TODO: Deal with other constant types (vectors, etc), and probably
1227  // some mem intrinsics (if needed)
1228 
1229  APInt EarlierValue =
1230  cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1231  APInt LaterValue =
1232  cast<ConstantInt>(Later->getValueOperand())->getValue();
1233  unsigned LaterBits = LaterValue.getBitWidth();
1234  assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1235  LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1236 
1237  // Offset of the smaller store inside the larger store
1238  unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1239  unsigned LShiftAmount =
1240  DL.isBigEndian()
1241  ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1242  : BitOffsetDiff;
1243  APInt Mask =
1244  APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1245  LShiftAmount + LaterBits);
1246  // Clear the bits we'll be replacing, then OR with the smaller
1247  // store, shifted appropriately.
1248  APInt Merged =
1249  (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1250  LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite
1251  << "\n Later: " << *Inst
1252  << "\n Merged Value: " << Merged << '\n');
1253 
1254  auto *SI = new StoreInst(
1255  ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1256  Earlier->getPointerOperand(), false, Earlier->getAlignment(),
1257  Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1258 
1259  unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1263  SI->copyMetadata(*DepWrite, MDToKeep);
1264  ++NumModifiedStores;
1265 
1266  // Remove earlier, wider, store
1267  size_t Idx = InstrOrdering.lookup(DepWrite);
1268  InstrOrdering.erase(DepWrite);
1269  InstrOrdering.insert(std::make_pair(SI, Idx));
1270 
1271  // Delete the old stores and now-dead instructions that feed them.
1272  deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1273  deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1274  &InstrOrdering);
1275  MadeChange = true;
1276 
1277  // We erased DepWrite and Inst (Loc); start over.
1278  break;
1279  }
1280  }
1281  }
1282 
1283  // If this is a may-aliased store that is clobbering the store value, we
1284  // can keep searching past it for another must-aliased pointer that stores
1285  // to the same location. For example, in:
1286  // store -> P
1287  // store -> Q
1288  // store -> P
1289  // we can remove the first store to P even though we don't know if P and Q
1290  // alias.
1291  if (DepWrite == &BB.front()) break;
1292 
1293  // Can't look past this instruction if it might read 'Loc'.
1294  if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1295  break;
1296 
1297  InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1298  DepWrite->getIterator(), &BB,
1299  /*QueryInst=*/ nullptr, &Limit);
1300  }
1301  }
1302 
1304  MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1305 
1306  // If this block ends in a return, unwind, or unreachable, all allocas are
1307  // dead at its end, which means stores to them are also dead.
1308  if (BB.getTerminator()->getNumSuccessors() == 0)
1309  MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
1310 
1311  return MadeChange;
1312 }
1313 
1316  const TargetLibraryInfo *TLI) {
1317  bool MadeChange = false;
1318  for (BasicBlock &BB : F)
1319  // Only check non-dead blocks. Dead blocks may have strange pointer
1320  // cycles that will confuse alias analysis.
1321  if (DT->isReachableFromEntry(&BB))
1322  MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1323 
1324  return MadeChange;
1325 }
1326 
1327 //===----------------------------------------------------------------------===//
1328 // DSE Pass
1329 //===----------------------------------------------------------------------===//
1331  AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1335 
1336  if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1337  return PreservedAnalyses::all();
1338 
1339  PreservedAnalyses PA;
1340  PA.preserveSet<CFGAnalyses>();
1341  PA.preserve<GlobalsAA>();
1343  return PA;
1344 }
1345 
1346 namespace {
1347 
1348 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1349 class DSELegacyPass : public FunctionPass {
1350 public:
1351  static char ID; // Pass identification, replacement for typeid
1352 
1353  DSELegacyPass() : FunctionPass(ID) {
1355  }
1356 
1357  bool runOnFunction(Function &F) override {
1358  if (skipFunction(F))
1359  return false;
1360 
1361  DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1362  AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1364  &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1365  const TargetLibraryInfo *TLI =
1366  &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1367 
1368  return eliminateDeadStores(F, AA, MD, DT, TLI);
1369  }
1370 
1371  void getAnalysisUsage(AnalysisUsage &AU) const override {
1372  AU.setPreservesCFG();
1380  }
1381 };
1382 
1383 } // end anonymous namespace
1384 
1385 char DSELegacyPass::ID = 0;
1386 
1387 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1388  false)
1395  false)
1396 
1398  return new DSELegacyPass();
1399 }
Legacy wrapper pass to provide the GlobalsAAResult object.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Value * getValueOperand()
Definition: Instructions.h:410
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:68
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Provides a lazy, caching interface for making common memory aliasing information queries, backed by LLVM&#39;s alias analysis passes.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:770
This class represents lattice values for constants.
Definition: AllocatorList.h:24
This is the interface for a simple mod/ref and alias analysis over globals.
void initializeDSELegacyPassPass(PassRegistry &)
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias. ...
bool isPrecise() const
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:858
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can&#39;t be evaluated...
This class represents a function call, abstracting a target machine&#39;s calling convention.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block...
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
StringRef getName(LibFunc F) const
bool salvageDebugInfo(Instruction &I)
Assuming the instruction I is going to be deleted, attempt to salvage debug users of I by writing the...
Definition: Local.cpp:1591
STATISTIC(NumFunctions, "Total number of functions")
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:231
F(f)
static bool handleFree(CallInst *F, AliasAnalysis *AA, MemoryDependenceResults *MD, DominatorTree *DT, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
Handle frees of entire structures whose dependency is a store to a field of that structure.
An instruction for reading from memory.
Definition: Instructions.h:168
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:138
static bool isPossibleSelfRead(Instruction *Inst, const MemoryLocation &InstStoreLoc, Instruction *DepWrite, const TargetLibraryInfo &TLI, AliasAnalysis &AA)
If &#39;Inst&#39; might be a self read (i.e.
#define op(i)
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:300
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1509
static void findUnconditionalPreds(SmallVectorImpl< BasicBlock *> &Blocks, BasicBlock *BB, DominatorTree *DT)
Find all blocks that will unconditionally lead to the block BB and append them to F...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:269
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1135
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
AnalysisUsage & addRequired()
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:134
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:371
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, MemoryDependenceResults *MD, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
Remove dead stores to stack-allocated locations in the function end block.
unsigned getDefaultBlockScanLimit() const
Some methods limit the number of instructions they will examine.
An analysis that produces MemoryDependenceResults for a function.
static MemoryLocation getLocForWrite(Instruction *Inst)
Return a Location stored to by the specified instruction.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:158
This file implements a class to represent arbitrary precision integral constant values and operations...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:85
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:142
static Value * getStoredPointerOperand(Instruction *I)
Return the pointer that is being written to.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
bool has(LibFunc F) const
Tests whether a library function is available.
std::map< int64_t, int64_t > OverlapIntervalsTy
static uint64_t getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI, const Function *F)
An instruction for storing to memory.
Definition: Instructions.h:321
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:145
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
Value * getOperand(unsigned i) const
Definition: User.h:170
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:211
uint64_t getValue() const
const BasicBlock & getEntryBlock() const
Definition: Function.h:640
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:854
static bool runOnFunction(Function &F, bool PostInlining)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
static bool isShortenableAtTheBeginning(Instruction *I)
Returns true if the beginning of this instruction can be safely shortened in length.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:154
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
static bool removePartiallyOverlappedStores(AliasAnalysis *AA, const DataLayout &DL, InstOverlapIntervalsTy &IOL)
static OverwriteResult isOverwrite(const MemoryLocation &Later, const MemoryLocation &Earlier, const DataLayout &DL, const TargetLibraryInfo &TLI, int64_t &EarlierOff, int64_t &LaterOff, Instruction *DepWrite, InstOverlapIntervalsTy &IOL, AliasAnalysis &AA, const Function *F)
Return &#39;OW_Complete&#39; if a store to the &#39;Later&#39; location completely overwrites a store to the &#39;Earlier...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:281
A manager for alias analyses.
static ManagedStatic< OptionRegistry > OR
Definition: Options.cpp:31
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
bool mayThrow() const
Return true if this instruction may throw an exception.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:113
static bool isShortenableAtTheEnd(Instruction *I)
Returns true if the end of this instruction can be safely shortened in length.
Represent the analysis usage information of a pass.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:434
FunctionPass * createDeadStoreEliminationPass()
static bool memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, AliasAnalysis *AA)
Returns true if the memory which is accessed by the second instruction is not modified between the fi...
std::underlying_type< E >::type Underlying(E Val)
Check that Val is in range for E, and return Val cast to E&#39;s underlying type.
Definition: BitmaskEnum.h:91
Analysis pass providing a never-invalidated alias analysis result.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known...
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:116
self_iterator getIterator()
Definition: ilist_node.h:82
INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false, false) INITIALIZE_PASS_END(DSELegacyPass
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:529
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:160
Dead Store Elimination
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance...
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
A memory dependence query can return one of three different answers.
const Value * Ptr
The address of the start of the location.
Representation for a specific memory location.
static cl::opt< bool > EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", cl::init(true), cl::Hidden, cl::desc("Enable partial-overwrite tracking in DSE"))
MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad, BasicBlock::iterator ScanIt, BasicBlock *BB, Instruction *QueryInst=nullptr, unsigned *Limit=nullptr)
Returns the instruction on which a memory location depends.
static void deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI, MemoryDependenceResults &MD, const TargetLibraryInfo &TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering, SmallSetVector< Value *, 16 > *ValueSet=nullptr)
Delete this instruction.
static cl::opt< bool > EnablePartialStoreMerging("enable-dse-partial-store-merging", cl::init(true), cl::Hidden, cl::desc("Enable partial store merging in DSE"))
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:298
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
Definition: User.h:192
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
static void removeAccessedObjects(const MemoryLocation &LoadedLoc, SmallSetVector< Value *, 16 > &DeadStackObjects, const DataLayout &DL, AliasAnalysis *AA, const TargetLibraryInfo *TLI, const Function *F)
Check to see if the specified location may alias any of the stack objects in the DeadStackObjects set...
iterator end()
Definition: BasicBlock.h:271
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
Module.h This file contains the declarations for the Module class.
Provides information about what library functions are available for the current target.
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:381
static bool hasAnalyzableMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI)
Does this instruction write some memory? This only returns true for things that we can analyze with o...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:622
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:286
static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI, AliasAnalysis *AA, MemoryDependenceResults *MD, const DataLayout &DL, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap< Instruction *, size_t > *InstrOrdering)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1437
void setOperand(unsigned i, Value *Val)
Definition: User.h:175
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
Class for arbitrary precision integers.
Definition: APInt.h:70
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are must-alias.
Represents analyses that only rely on functions&#39; control flow.
Definition: PassManager.h:115
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
Definition: APInt.h:607
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
static bool tryToShortenBegin(Instruction *EarlierWrite, OverlapIntervalsTy &IntervalMap, int64_t &EarlierStart, int64_t &EarlierSize)
bool doesNotAccessMemory(const CallBase *Call)
Checks if the specified call is known to never read or write memory.
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
static MemoryLocation getLocForRead(Instruction *Inst, const TargetLibraryInfo &TLI)
Return the location read by the specified "hasAnalyzableMemoryWrite" instruction if any...
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
This file provides utility analysis objects describing memory locations.
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:190
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates zero-filled memory (such as...
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:73
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc...
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
uint32_t Size
Definition: Profile.cpp:47
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:175
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:211
Analysis pass providing the TargetLibraryInfo.
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
static GetElementPtrInst * CreateInBounds(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:914
static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA, MemoryDependenceResults *MD, DominatorTree *DT, const TargetLibraryInfo *TLI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const BasicBlock & front() const
Definition: Function.h:663
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
Definition: Local.cpp:349
LLVM Value Representation.
Definition: Value.h:73
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
bool remove_if(UnaryPredicate P)
Remove items from the set vector based on a predicate function.
Definition: SetVector.h:200
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
static bool isRemovable(Instruction *I)
If the value of this instruction and the memory it writes to is unused, may we delete this instructio...
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
Various options to control the behavior of getObjectSize.
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:260
static bool isVolatile(Instruction *Inst)
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
This header defines various interfaces for pass management in LLVM.
bool isBigEndian() const
Definition: DataLayout.h:222
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc)
getModRefInfo (for call sites) - Return information about whether a particular call site modifies or ...
#define LLVM_DEBUG(X)
Definition: Debug.h:123
Value * getPointerOperand()
Definition: Instructions.h:413
bool use_empty() const
Definition: Value.h:323
static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset, int64_t &EarlierSize, int64_t LaterOffset, int64_t LaterSize, bool IsOverwriteEnd)
iterator_range< arg_iterator > args()
Definition: Function.h:689
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
const BasicBlock * getParent() const
Definition: Instruction.h:67
static bool tryToShortenEnd(Instruction *EarlierWrite, OverlapIntervalsTy &IntervalMap, int64_t &EarlierStart, int64_t &EarlierSize)
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=DefaultMaxUsesToExplore)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
LLVM_NODISCARD bool isRefSet(const ModRefInfo MRI)