67 #define DEBUG_TYPE "early-cse" 69 STATISTIC(NumSimplify,
"Number of instructions simplified or DCE'd");
70 STATISTIC(NumCSE,
"Number of instructions CSE'd");
71 STATISTIC(NumCSECVP,
"Number of compare instructions CVP'd");
72 STATISTIC(NumCSELoad,
"Number of load instructions CSE'd");
73 STATISTIC(NumCSECall,
"Number of call instructions CSE'd");
74 STATISTIC(NumDSE,
"Number of trivial dead stores removed");
77 "Controls which instructions are removed");
100 if (
CallInst *CI = dyn_cast<CallInst>(Inst))
101 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
102 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
103 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
104 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
105 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
106 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
123 static unsigned getHashValue(SimpleValue Val);
124 static bool isEqual(SimpleValue LHS, SimpleValue RHS);
133 Value *LHS = BinOp->getOperand(0);
134 Value *RHS = BinOp->getOperand(1);
135 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
141 if (
CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
142 Value *LHS = CI->getOperand(0);
143 Value *RHS = CI->getOperand(1);
147 Pred = CI->getSwappedPredicate();
170 if (
CastInst *CI = dyn_cast<CastInst>(Inst))
171 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
174 return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
178 return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
182 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
183 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
184 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
185 isa<ShuffleVectorInst>(Inst)) &&
186 "Invalid/unknown instruction");
197 if (LHS.isSentinel() || RHS.isSentinel())
200 if (LHSI->
getOpcode() != RHSI->getOpcode())
207 if (!LHSBinOp->isCommutative())
210 assert(isa<BinaryOperator>(RHSI) &&
211 "same opcode, but different instruction type?");
216 LHSBinOp->getOperand(1) == RHSBinOp->
getOperand(0);
218 if (
CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
219 assert(isa<CmpInst>(RHSI) &&
220 "same opcode, but different instruction type?");
221 CmpInst *RHSCmp = cast<CmpInst>(RHSI);
224 LHSCmp->getOperand(1) == RHSCmp->
getOperand(0) &&
225 LHSCmp->getSwappedPredicate() == RHSCmp->
getPredicate();
241 return LHSA == RHSA && LHSB == RHSB;
242 return ((LHSA == RHSA && LHSB == RHSB) ||
243 (LHSA == RHSB && LHSB == RHSA));
295 static unsigned getHashValue(CallValue Val);
296 static bool isEqual(CallValue LHS, CallValue RHS);
311 if (LHS.isSentinel() || RHS.isSentinel())
337 std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
352 ScopedHTType AvailableValues;
370 unsigned Generation = 0;
372 bool IsAtomic =
false;
374 LoadValue() =
default;
375 LoadValue(
Instruction *Inst,
unsigned Generation,
unsigned MatchingId,
377 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
378 IsAtomic(IsAtomic) {}
381 using LoadMapAllocator =
388 LoadHTType AvailableLoads;
393 using InvariantMapAllocator =
396 using InvariantHTType =
398 InvariantMapAllocator>;
399 InvariantHTType AvailableInvariants;
407 CallHTType AvailableCalls;
410 unsigned CurrentGeneration = 0;
416 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
417 MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
427 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
428 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
429 : Scope(AvailableValues), LoadScope(AvailableLoads),
430 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
431 NodeScope(
const NodeScope &) =
delete;
432 NodeScope &operator=(
const NodeScope &) =
delete;
435 ScopedHTType::ScopeTy Scope;
436 LoadHTType::ScopeTy LoadScope;
437 InvariantHTType::ScopeTy InvariantScope;
438 CallHTType::ScopeTy CallScope;
447 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
448 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
451 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
453 Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
456 StackNode(
const StackNode &) =
delete;
457 StackNode &operator=(
const StackNode &) =
delete;
460 unsigned currentGeneration() {
return CurrentGeneration; }
461 unsigned childGeneration() {
return ChildGeneration; }
473 bool isProcessed() {
return Processed; }
474 void process() { Processed =
true; }
477 unsigned CurrentGeneration;
478 unsigned ChildGeneration;
483 bool Processed =
false;
488 class ParseMemoryInst {
494 IsTargetMemInst =
true;
498 if (IsTargetMemInst)
return Info.ReadMem;
499 return isa<LoadInst>(Inst);
503 if (IsTargetMemInst)
return Info.WriteMem;
504 return isa<StoreInst>(Inst);
513 bool isUnordered()
const {
515 return Info.isUnordered();
517 if (
LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
518 return LI->isUnordered();
519 }
else if (
StoreInst *
SI = dyn_cast<StoreInst>(Inst)) {
520 return SI->isUnordered();
528 return Info.IsVolatile;
530 if (
LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
531 return LI->isVolatile();
532 }
else if (
StoreInst *
SI = dyn_cast<StoreInst>(Inst)) {
533 return SI->isVolatile();
539 bool isInvariantLoad()
const {
540 if (
auto *LI = dyn_cast<LoadInst>(Inst))
545 bool isMatchingMemLoc(
const ParseMemoryInst &Inst)
const {
547 getMatchingId() == Inst.getMatchingId());
556 int getMatchingId()
const {
557 if (IsTargetMemInst)
return Info.MatchingId;
562 if (IsTargetMemInst)
return Info.PtrVal;
566 bool mayReadFromMemory()
const {
567 if (IsTargetMemInst)
return Info.ReadMem;
568 return Inst->mayReadFromMemory();
571 bool mayWriteToMemory()
const {
572 if (IsTargetMemInst)
return Info.WriteMem;
573 return Inst->mayWriteToMemory();
577 bool IsTargetMemInst =
false;
588 if (
auto *LI = dyn_cast<LoadInst>(Inst))
590 if (
auto *
SI = dyn_cast<StoreInst>(Inst))
591 return SI->getValueOperand();
592 assert(isa<IntrinsicInst>(Inst) &&
"Instruction not supported");
599 bool isOperatingOnInvariantMemAt(
Instruction *I,
unsigned GenAt);
601 bool isSameMemGeneration(
unsigned EarlierGeneration,
unsigned LaterGeneration,
623 for (
unsigned I = 0; I < WorkQueue.
size(); ++
I) {
626 for (
auto *U : WI->
users())
627 if (
MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
630 MSSAUpdater->removeMemoryAccess(WI);
635 [=](
Use &
In) {
return In == FirstIn; }))
662 bool EarlyCSE::isSameMemGeneration(
unsigned EarlierGeneration,
663 unsigned LaterGeneration,
667 if (EarlierGeneration == LaterGeneration)
680 auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
683 auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
692 MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
693 return MSSA->dominates(LaterDef, EarlierMA);
696 bool EarlyCSE::isOperatingOnInvariantMemAt(
Instruction *I,
unsigned GenAt) {
699 if (
auto *LI = dyn_cast<LoadInst>(I))
709 if (!AvailableInvariants.count(MemLoc))
714 return AvailableInvariants.lookup(MemLoc) <= GenAt;
717 bool EarlyCSE::handleBranchCondition(
Instruction *CondInst,
728 return BOp->getOpcode() == Opcode;
734 unsigned PropagateOpcode =
735 (BI->
getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
737 bool MadeChanges =
false;
741 while (!WorkList.
empty()) {
744 AvailableValues.insert(Curr, TorF);
746 << Curr->
getName() <<
"' as " << *TorF <<
" in " 759 if (MatchBinOp(Curr, PropagateOpcode))
760 for (
auto &
Op : cast<BinaryOperator>(Curr)->operands())
762 if (SimpleValue::canHandle(OPI) && Visited.
insert(OPI).second)
770 bool Changed =
false;
792 if (CondInst && SimpleValue::canHandle(CondInst))
793 Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
828 if (
match(Inst, m_Intrinsic<Intrinsic::assume>())) {
831 if (CondI && SimpleValue::canHandle(CondI)) {
836 LLVM_DEBUG(
dbgs() <<
"EarlyCSE skipping assumption: " << *Inst <<
'\n');
841 if (
match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
842 LLVM_DEBUG(
dbgs() <<
"EarlyCSE skipping sideeffect: " << *Inst <<
'\n');
859 if (
match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
863 auto *CI = cast<CallInst>(Inst);
866 if (!AvailableInvariants.count(MemLoc))
867 AvailableInvariants.insert(MemLoc, CurrentGeneration);
873 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
874 if (SimpleValue::canHandle(CondI)) {
876 if (
auto *KnownCond = AvailableValues.lookup(CondI)) {
878 if (isa<ConstantInt>(KnownCond) &&
879 cast<ConstantInt>(KnownCond)->isOne()) {
881 <<
"EarlyCSE removing guard: " << *Inst <<
'\n');
888 cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
906 LLVM_DEBUG(
dbgs() <<
"EarlyCSE Simplify: " << *Inst <<
" to: " << *V
930 if (SimpleValue::canHandle(Inst)) {
932 if (
Value *V = AvailableValues.lookup(Inst)) {
939 if (
auto *I = dyn_cast<Instruction>(V))
950 AvailableValues.insert(Inst, Inst);
954 ParseMemoryInst MemInst(Inst, TTI);
956 if (MemInst.isValid() && MemInst.isLoad()) {
959 if (MemInst.isVolatile() || !MemInst.isUnordered()) {
964 if (MemInst.isInvariantLoad()) {
971 if (!AvailableInvariants.count(MemLoc))
972 AvailableInvariants.insert(MemLoc, CurrentGeneration);
982 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
983 if (InVal.DefInst !=
nullptr &&
984 InVal.MatchingId == MemInst.getMatchingId() &&
986 !MemInst.isVolatile() && MemInst.isUnordered() &&
988 InVal.IsAtomic >= MemInst.isAtomic() &&
989 (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
990 isSameMemGeneration(InVal.Generation, CurrentGeneration,
991 InVal.DefInst, Inst))) {
995 <<
" to: " << *InVal.DefInst <<
'\n');
1011 AvailableLoads.insert(
1012 MemInst.getPointerOperand(),
1013 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1014 MemInst.isAtomic()));
1015 LastStore =
nullptr;
1026 !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1027 LastStore =
nullptr;
1030 if (CallValue::canHandle(Inst)) {
1033 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
1034 if (InVal.first !=
nullptr &&
1035 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1038 <<
" to: " << *InVal.first <<
'\n');
1053 AvailableCalls.insert(
1054 Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1063 if (
FenceInst *FI = dyn_cast<FenceInst>(Inst))
1074 if (MemInst.isValid() && MemInst.isStore()) {
1075 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1076 if (InVal.DefInst &&
1077 InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
1078 InVal.MatchingId == MemInst.getMatchingId() &&
1080 !MemInst.isVolatile() && MemInst.isUnordered() &&
1081 (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1082 isSameMemGeneration(InVal.Generation, CurrentGeneration,
1083 InVal.DefInst, Inst))) {
1090 MemInst.getPointerOperand() ||
1092 "can't have an intervening store if not using MemorySSA!");
1093 LLVM_DEBUG(
dbgs() <<
"EarlyCSE DSE (writeback): " << *Inst <<
'\n');
1112 ++CurrentGeneration;
1114 if (MemInst.isValid() && MemInst.isStore()) {
1123 ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1124 assert(LastStoreMemInst.isUnordered() &&
1125 !LastStoreMemInst.isVolatile() &&
1126 "Violated invariant");
1127 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1129 <<
" due to: " << *Inst <<
'\n');
1133 removeMSSA(LastStore);
1137 LastStore =
nullptr;
1148 AvailableLoads.insert(
1149 MemInst.getPointerOperand(),
1150 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1151 MemInst.isAtomic()));
1160 if (MemInst.isUnordered() && !MemInst.isVolatile())
1163 LastStore =
nullptr;
1171 bool EarlyCSE::run() {
1177 std::deque<StackNode *> nodesToProcess;
1179 bool Changed =
false;
1182 nodesToProcess.push_back(
new StackNode(
1183 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1184 CurrentGeneration, DT.getRootNode(),
1185 DT.getRootNode()->begin(), DT.getRootNode()->end()));
1188 unsigned LiveOutGeneration = CurrentGeneration;
1191 while (!nodesToProcess.empty()) {
1194 StackNode *NodeToProcess = nodesToProcess.back();
1197 CurrentGeneration = NodeToProcess->currentGeneration();
1200 if (!NodeToProcess->isProcessed()) {
1202 Changed |= processNode(NodeToProcess->node());
1203 NodeToProcess->childGeneration(CurrentGeneration);
1204 NodeToProcess->process();
1205 }
else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1208 nodesToProcess.push_back(
1209 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1210 AvailableCalls, NodeToProcess->childGeneration(),
1211 child, child->
begin(), child->
end()));
1215 delete NodeToProcess;
1216 nodesToProcess.pop_back();
1221 CurrentGeneration = LiveOutGeneration;
1257 template<
bool UseMemorySSA>
1270 if (skipFunction(F))
1273 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1274 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1275 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1276 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1278 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() :
nullptr;
1314 using EarlyCSEMemSSALegacyPass =
1315 EarlyCSELegacyCommonPass<
true>;
1318 char EarlyCSEMemSSALegacyPass::
ID = 0;
1322 return new EarlyCSEMemSSALegacyPass();
1328 "Early CSE w/ MemorySSA",
false,
false)
Legacy wrapper pass to provide the GlobalsAAResult object.
void initializeEarlyCSELegacyPassPass(PassRegistry &)
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
A parsed version of the target data layout string in and methods for querying it. ...
const_iterator end(StringRef path)
Get end iterator over path.
static ConstantInt * getFalse(LLVMContext &Context)
static SimpleValue getTombstoneKey()
This class is the base class for the comparison instructions.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Value * getPointerOperand(Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
Atomic ordering constants.
bool VerifyMemorySSA
Enables verification of MemorySSA.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents lattice values for constants.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
This is the interface for a simple mod/ref and alias analysis over globals.
An instruction for ordering other memory operations.
void push_back(const T &Elt)
value_op_iterator value_op_begin()
This class represents a function call, abstracting a target machine's calling convention.
An immutable pass that tracks lazily created AssumptionCache objects.
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
A cache of @llvm.assume calls within a function.
Analysis pass providing the TargetTransformInfo.
bool salvageDebugInfo(Instruction &I)
Assuming the instruction I is going to be deleted, attempt to salvage debug users of I by writing the...
static CallValue getTombstoneKey()
bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in .dbg intrinsics with undef.
value_op_iterator value_op_end()
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
BasicBlock * getSuccessor(unsigned i) const
STATISTIC(NumFunctions, "Total number of functions")
Analysis pass which computes a DominatorTree.
block Block Frequency true
An instruction for reading from memory.
Value * getCondition() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This defines the Use class.
static Optional< MemoryLocation > getOrNone(const Instruction *Inst)
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
LLVMContext & getContext() const
Get the context in which this basic block lives.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
This file defines the MallocAllocator and BumpPtrAllocator interfaces.
iterator begin()
Instruction iterator methods.
bool isIdenticalTo(const Instruction *I) const
Return true if the specified instruction is exactly identical to the current one. ...
bool match(Val *V, const Pattern &P)
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Legacy analysis pass which computes MemorySSA.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
This is the base class for all instructions that perform data casts.
A Use represents the edge between a Value definition and its users.
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
Encapsulates MemorySSA, including all data associated with memory accesses.
static bool isLoad(int Opcode)
static CallValue getEmptyKey()
RecyclingAllocator - This class wraps an Allocator, adding the functionality of recycling deleted obj...
static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
This file provides an implementation of debug counters.
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
Type * getType() const
All values are typed, get the type of this value.
bool insert(const value_type &X)
Insert a new element into the SetVector.
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
static bool isEqual(const Function &Caller, const Function &Callee)
This file provides the interface for a simple, fast CSE pass.
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction...
static bool isStore(int Opcode)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Value * getLoadStorePointerOperand(Value *V)
A helper function that returns the pointer operand of a load or store instruction.
An instruction for storing to memory.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Optimize for code generation
INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) using EarlyCSEMemSSALegacyPass
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Value * getOperand(unsigned i) const
Analysis containing CSE Info
bool isVoidTy() const
Return true if this is 'void'.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
static bool runOnFunction(Function &F, bool PostInlining)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
bool isIdenticalToWhenDefined(const Instruction *I) const
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags, which may specify conditions under which the instruction's result is undefined.
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
Conditional or Unconditional Branch instruction.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static SimpleValue getEmptyKey()
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool mayThrow() const
Return true if this instruction may throw an exception.
Represent the analysis usage information of a pass.
Analysis pass providing a never-invalidated alias analysis result.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
FunctionPass class - This class is used to implement most global optimizations.
static bool shouldExecute(unsigned CounterName)
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
static bool isAtomic(Instruction *I)
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
Representation for a specific memory location.
A function analysis which provides an AssumptionCache.
A SetVector that performs no allocations if smaller than a certain size.
Iterator for intrusive lists based on ilist_node.
SelectPatternFlavor Flavor
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
SelectPatternFlavor
Specific patterns of select instructions we can match.
Provides information about what library functions are available for the current target.
An analysis that produces MemorySSA for a function.
LLVM_NODISCARD T pop_back_val()
bool isConditional() const
void setPreservesCFG()
This function should be called by the pass, iff they do not:
static ConstantInt * getTrue(LLVMContext &Context)
bool isGuard(const User *U)
Returns true iff U has semantics of a guard.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
iterator_range< user_iterator > users()
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Represents analyses that only rely on functions' control flow.
Predicate getPredicate() const
Return the predicate for this instruction.
LLVM_NODISCARD bool empty() const
void preserveSet()
Mark an analysis set as preserved.
StringRef getName() const
Return a constant reference to the value's name.
bool onlyReadsMemory(unsigned OpNo) const
bool mayReadFromMemory() const
Return true if this instruction may read memory.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
void preserve()
Mark an analysis as preserved.
DEBUG_COUNTER(CSECounter, "early-cse", "Controls which instructions are removed")
EarlyCSELegacyCommonPass< false > EarlyCSELegacyPass
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
Module * getParent()
Get the module that this global value is contained inside of...
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
LLVM Value Representation.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
typename std::vector< DomTreeNodeBase *>::iterator iterator
void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry &)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
static bool isVolatile(Instruction *Inst)
Represents phi nodes for memory accesses.
This header defines various interfaces for pass management in LLVM.
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Information about a load/store intrinsic defined by the target.
A wrapper class for inspecting calls to intrinsic functions.
This instruction inserts a struct field of array element value into an aggregate value.