31 #define DEBUG_TYPE "instcombine" 33 STATISTIC(NumDeadStore,
"Number of dead stores eliminated");
34 STATISTIC(NumGlobalCopies,
"Number of allocas copied from constant global");
41 return GV->isConstant();
44 if (CE->getOpcode() == Instruction::BitCast ||
45 CE->getOpcode() == Instruction::AddrSpaceCast ||
46 CE->getOpcode() == Instruction::GetElementPtr)
68 while (!ValuesToInspect.
empty()) {
70 const bool IsOffset = ValuePair.second;
71 for (
auto &U : ValuePair.first->uses()) {
72 auto *
I = cast<Instruction>(U.getUser());
74 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
76 if (!LI->isSimple())
return false;
80 if (isa<BitCastInst>(
I) || isa<AddrSpaceCastInst>(
I)) {
85 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
98 unsigned DataOpNo = CS.getDataOperandNo(&U);
99 bool IsArgOperand = CS.isArgOperand(&U);
102 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
108 if (CS.onlyReadsMemory() &&
109 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
114 if (IsArgOperand && CS.isByValArgument(DataOpNo))
119 if (
I->isLifetimeStartOrEnd()) {
120 assert(
I->use_empty() &&
"Lifetime markers have no result to use!");
133 if (U.getOperandNo() == 1) {
139 if (TheCopy)
return false;
143 if (IsOffset)
return false;
146 if (U.getOperandNo() != 0)
return false;
180 APInt(64, AllocaSize), DL);
198 if (
C->getValue().getActiveBits() <= 64) {
207 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
215 Value *Idx[2] = {NullIdx, NullIdx};
252 class PointerReplacer {
268 void PointerReplacer::findLoadAndReplace(
Instruction &
I) {
269 for (
auto U : I.
users()) {
274 if (isa<LoadInst>(Inst)) {
278 }
else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
279 Path.push_back(Inst);
280 findLoadAndReplace(*Inst);
288 Value *PointerReplacer::getReplacement(
Value *V) {
289 auto Loc = WorkMap.find(V);
290 if (Loc != WorkMap.end())
296 if (getReplacement(I))
299 if (
auto *
LT = dyn_cast<LoadInst>(I)) {
300 auto *V = getReplacement(
LT->getPointerOperand());
301 assert(V &&
"Operand not replaced");
304 IC.InsertNewInstWith(NewI, *
LT);
305 IC.replaceInstUsesWith(*
LT, NewI);
307 }
else if (
auto *
GEP = dyn_cast<GetElementPtrInst>(I)) {
308 auto *V = getReplacement(
GEP->getPointerOperand());
309 assert(V &&
"Operand not replaced");
314 IC.InsertNewInstWith(NewI, *
GEP);
317 }
else if (
auto *BC = dyn_cast<BitCastInst>(I)) {
318 auto *V = getReplacement(BC->getOperand(0));
319 assert(V &&
"Operand not replaced");
323 IC.InsertNewInstWith(NewI, *BC);
333 auto *PT = cast<PointerType>(I.
getType());
334 auto *NT = cast<PointerType>(V->
getType());
335 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
339 findLoadAndReplace(I);
366 if (FirstInst != &AI) {
390 return replaceInstUsesWith(AI, EntryAI);
405 Copy->getSource(), AI.
getAlignment(), DL, &AI, &AC, &DT);
406 if (AI.getAlignment() <= SourceAlign &&
408 LLVM_DEBUG(
dbgs() <<
"Found alloca equal to global: " << AI <<
'\n');
410 for (
unsigned i = 0, e = ToDelete.
size(); i != e; ++i)
411 eraseInstFromFunction(*ToDelete[i]);
412 Constant *TheSrc = cast<Constant>(Copy->getSource());
413 auto *SrcTy = TheSrc->
getType();
415 SrcTy->getPointerAddressSpace());
418 if (AI.getType()->getPointerAddressSpace() ==
419 SrcTy->getPointerAddressSpace()) {
421 eraseInstFromFunction(*Copy);
425 PointerReplacer PtrReplacer(*
this);
426 PtrReplacer.replacePointer(AI, Cast);
435 return visitAllocSite(AI);
453 const Twine &Suffix =
"") {
455 "can't fold an atomic load to requested type");
462 Value *NewPtr =
nullptr;
472 for (
const auto &MDPair : MD) {
473 unsigned ID = MDPair.first;
521 "can't fold an atomic store of requested type");
532 for (
const auto &MDPair : MD) {
533 unsigned ID = MDPair.first;
640 return SI &&
SI->getPointerOperand() != &LI &&
641 !
SI->getPointerOperand()->isSwiftError();
648 auto *
SI = cast<StoreInst>(*UI++);
664 if (
auto* CI = dyn_cast<CastInst>(LI.
user_back()))
665 if (CI->isNoopCast(DL))
691 if (
auto *
ST = dyn_cast<StructType>(T)) {
693 auto NumElements =
ST->getNumElements();
694 if (NumElements == 1) {
708 if (SL->hasPadding())
720 for (
unsigned i = 0; i < NumElements; i++) {
721 Value *Indices[2] = {
732 L->setAAMetadata(AAMD);
740 if (
auto *AT = dyn_cast<ArrayType>(T)) {
741 auto *ET = AT->getElementType();
742 auto NumElements = AT->getNumElements();
743 if (NumElements == 1) {
771 for (uint64_t i = 0; i < NumElements; i++) {
772 Value *Indices[2] = {
782 L->setAAMetadata(AAMD);
809 if (!Visited.
insert(P).second)
818 if (
PHINode *PN = dyn_cast<PHINode>(P)) {
819 for (
Value *IncValue : PN->incoming_values())
825 if (GA->isInterposable())
833 if (
AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
834 if (!AI->getAllocatedType()->isSized())
850 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
854 if (InitSize > MaxSize)
860 }
while (!Worklist.
empty());
891 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V))
903 Idx = FirstNZIdx(GEPI);
912 if (!AllocTy || !AllocTy->isSized())
921 auto IsAllNonNegative = [&]() {
922 for (
unsigned i = Idx+1, e = GEPI->
getNumOperands(); i != e; ++i) {
949 template <
typename T>
959 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
973 Ptr = GEPI->getOperand(0);
974 return (isa<ConstantPointerNull>(Ptr) &&
980 const Value *GEPI0 = GEPI->getOperand(0);
981 if (isa<ConstantPointerNull>(GEPI0) &&
985 if (isa<UndefValue>(Op) ||
986 (isa<ConstantPointerNull>(
Op) &&
1001 Op, DL.getPrefTypeAlignment(LI.
getType()), DL, &LI, &AC, &DT);
1003 unsigned EffectiveLoadAlign =
1004 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.
getType());
1006 if (KnownAlign > EffectiveLoadAlign)
1008 else if (LoadAlign == 0)
1013 Worklist.Add(NewGEPI);
1024 bool IsLoadCSE =
false;
1030 return replaceInstUsesWith(
1031 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.
getType(),
1069 LoadInst *V1 = Builder.CreateLoad(
SI->getOperand(1),
1070 SI->getOperand(1)->getName()+
".val");
1072 SI->getOperand(2)->getName()+
".val");
1082 if (isa<ConstantPointerNull>(
SI->getOperand(1)) &&
1090 if (isa<ConstantPointerNull>(
SI->getOperand(2)) &&
1117 while (
auto *IV = dyn_cast<InsertValueInst>(V)) {
1121 auto *
W =
E->getVectorOperand();
1127 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1129 V = IV->getAggregateOperand();
1131 if (!isa<UndefValue>(V) ||!U)
1134 auto *UT = cast<VectorType>(U->
getType());
1138 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1141 if (
auto *AT = dyn_cast<ArrayType>(VT)) {
1142 if (AT->getNumElements() != UT->getNumElements())
1145 auto *
ST = cast<StructType>(VT);
1146 if (
ST->getNumElements() != UT->getNumElements())
1148 for (
const auto *EltT :
ST->elements()) {
1149 if (EltT != UT->getElementType())
1189 if (
auto *BC = dyn_cast<BitCastInst>(V)) {
1190 V = BC->getOperand(0);
1220 if (
auto *
ST = dyn_cast<StructType>(T)) {
1222 unsigned Count =
ST->getNumElements();
1233 if (SL->hasPadding())
1244 AddrName +=
".repack";
1248 for (
unsigned i = 0; i < Count; i++) {
1249 Value *Indices[2] = {
1260 NS->setAAMetadata(AAMD);
1266 if (
auto *AT = dyn_cast<ArrayType>(T)) {
1268 auto NumElements = AT->getNumElements();
1269 if (NumElements == 1) {
1292 AddrName +=
".repack";
1298 for (uint64_t i = 0; i < NumElements; i++) {
1299 Value *Indices[2] = {
1330 if (A == B)
return true;
1337 if (isa<BinaryOperator>(A) ||
1340 isa<GetElementPtrInst>(A))
1342 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1362 if (!LI->getType()->isIntegerTy())
1367 if (!
all_of(LI->users(), [LI, LoadAddr](
User *U) {
1379 for (
auto *UI : LI->users()) {
1380 auto *USI = cast<StoreInst>(UI);
1395 return eraseInstFromFunction(SI);
1399 Ptr, DL.getPrefTypeAlignment(Val->
getType()), DL, &SI, &AC, &DT);
1401 unsigned EffectiveStoreAlign =
1402 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->
getType());
1404 if (KnownAlign > EffectiveStoreAlign)
1406 else if (StoreAlign == 0)
1411 return eraseInstFromFunction(SI);
1414 return eraseInstFromFunction(SI);
1418 Worklist.Add(NewGEPI);
1429 if (isa<AllocaInst>(Ptr))
1430 return eraseInstFromFunction(SI);
1432 if (isa<AllocaInst>(
GEP->getOperand(0))) {
1433 if (
GEP->getOperand(0)->hasOneUse())
1434 return eraseInstFromFunction(SI);
1443 for (
unsigned ScanInsts = 6; BBI != SI.
getParent()->
begin() && ScanInsts;
1448 if (isa<DbgInfoIntrinsic>(BBI) ||
1449 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1454 if (
StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1460 eraseInstFromFunction(*PrevSI);
1469 if (
LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1472 return eraseInstFromFunction(SI);
1481 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1488 if (!isa<UndefValue>(Val)) {
1497 if (isa<UndefValue>(Val))
1498 return eraseInstFromFunction(SI);
1506 }
while (isa<DbgInfoIntrinsic>(BBI) ||
1507 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1509 if (
BranchInst *BI = dyn_cast<BranchInst>(BBI))
1510 if (BI->isUnconditional())
1511 mergeStoreIntoSuccessor(SI);
1521 bool InstCombiner::mergeStoreIntoSuccessor(
StoreInst &
SI) {
1523 "This code has not been audited for volatile or ordered store case.");
1533 if (*PredIter == StoreBB)
1539 if (StoreBB == DestBB || OtherBB == DestBB)
1545 if (!OtherBr || BBI == OtherBB->
begin())
1551 if (OtherBr->isUnconditional()) {
1554 while (isa<DbgInfoIntrinsic>(BBI) ||
1555 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1556 if (BBI==OtherBB->
begin())
1569 if (OtherBr->getSuccessor(0) != StoreBB &&
1570 OtherBr->getSuccessor(1) != StoreBB)
1578 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1586 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1587 BBI->mayWriteToMemory() || BBI == OtherBB->
begin())
1609 MergedVal = InsertNewInstBefore(PN, DestBB->
front());
1618 InsertNewInstBefore(NewSI, *BBI);
1630 eraseInstFromFunction(SI);
1631 eraseInstFromFunction(*OtherStore);
Value * CreateInBoundsGEP(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &Name="")
Value * getValueOperand()
A parsed version of the target data layout string in and methods for querying it. ...
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
uint64_t getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
static Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
bool isSameOperationAs(const Instruction *I, unsigned flags=0) const
This function determines if the specified instruction executes the same operation as the current one...
This class represents lattice values for constants.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, const char *Name)
Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")' correctly, instead of converting the strin...
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static const DILocation * getMergedLocation(const DILocation *LocA, const DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
void setAlignment(unsigned Align)
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
void push_back(const T &Elt)
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value *> IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
LLVMContext & getContext() const
All values hold a context through their type.
This class implements a map that also provides access to all stored values in a deterministic order...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
STATISTIC(NumFunctions, "Total number of functions")
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LoadInst * combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
An instruction for reading from memory.
static bool pointsToConstantGlobal(Value *V)
pointsToConstantGlobal - Return true if V (possibly indirectly) points to some part of a constant glo...
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static IntegerType * getInt64Ty(LLVMContext &C)
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
bool isSafeToLoadUnconditionally(Value *V, unsigned Align, const DataLayout &DL, Instruction *ScanFrom=nullptr, const DominatorTree *DT=nullptr)
Return true if we know that executing a load from this value cannot trap.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
iterator begin()
Instruction iterator methods.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, bool isVolatile=false)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
bool match(Val *V, const Pattern &P)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
amdgpu Simplify well known AMD library false Value Value const Twine & Name
This class represents the LLVM 'select' instruction.
Type * getPointerElementType() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
PointerType * getType() const
Overload to return most specific pointer type.
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, AliasAnalysis *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Instruction * eraseInstFromFunction(Instruction &I)
Combiner aware instruction erasure.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static Instruction * replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, T &MemI)
The core instruction combiner logic.
void setName(const Twine &Name)
Change the name of the value.
Type * getSourceElementType() const
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following: ...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
A constant value that is initialized with an expression using other constant values.
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Type * getType() const
All values are typed, get the type of this value.
void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static Instruction * simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI)
bool isSwiftError() const
Return true if this value is a swifterror value.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
This class represents a no-op cast from one type to another.
const APInt & getValue() const
Return the constant as an APInt value reference.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
An instruction for storing to memory.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
void takeName(Value *V)
Transfer the name from V to this value.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
Value * getOperand(unsigned i) const
const DataLayout & getDataLayout() const
const BasicBlock & getEntryBlock() const
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
static Instruction * combineLoadToOperationType(InstCombiner &IC, LoadInst &LI)
Combine loads to match the type of their uses' value after looking through intervening bitcasts...
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction...
void setAAMetadata(const AAMDNodes &N)
Sets the metadata on this instruction from the AAMDNodes structure.
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
Conditional or Unconditional Branch instruction.
CastClass_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
bool isPointerTy() const
True if this is an instance of PointerType.
const Instruction & front() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
static StoreInst * combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
bool mayThrow() const
Return true if this instruction may throw an exception.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static void replace(Module &M, GlobalVariable *Old, GlobalVariable *New)
void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Instruction * visitAllocaInst(AllocaInst &AI)
Value * getPointerOperand()
self_iterator getIterator()
const Function * getFunction() const
Return the function this instruction belongs to.
void setAlignment(unsigned Align)
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
const Value * getArraySize() const
Get the number of elements allocated.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction *> &ToDelete)
isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) pointer to an alloca...
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
This is the shared class of boolean and integer constants.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
static bool isSupportedAtomicType(Type *Ty)
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU...
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
bool isAggregateType() const
Return true if the type is an aggregate type.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
LLVM_NODISCARD T pop_back_val()
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode *>> &MDs) const
Get all metadata attached to this Instruction.
void setOperand(unsigned i, Value *Val)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Class for arbitrary precision integers.
iterator_range< user_iterator > users()
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
bool isNonIntegralPointerType(PointerType *PT) const
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
This class wraps the llvm.memcpy/memmove intrinsics.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
void emplace_back(ArgTypes &&... Args)
static Type * getIndexedType(Type *Ty, ArrayRef< Value *> IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
static IntegerType * getInt32Ty(LLVMContext &C)
LLVM_NODISCARD bool empty() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC, StoreInst &SI)
Converts store (bitcast (load (bitcast (select ...)))) to store (load (select ...)), where select is minmax: select ((cmp load V1, load V2), V1, V2).
StringRef getName() const
Return a constant reference to the value's name.
const Function * getParent() const
Return the enclosing method, or null if none.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static Value * likeBitCastFromVector(InstCombiner &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
bool mayReadFromMemory() const
Return true if this instruction may read memory.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Instruction * InsertNewInstBefore(Instruction *New, Instruction &Old)
Inserts an instruction New before instruction Old.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
static GetElementPtrInst * CreateInBounds(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Instruction * visitLoadInst(LoadInst &LI)
LLVM Value Representation.
void setAlignment(unsigned Align)
This file provides internal interfaces used to implement the InstCombine.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Instruction * visitStoreInst(StoreInst &SI)
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Value * getSource() const
This is just like getRawSource, but it strips off any cast instructions that feed it...
bool hasOneUse() const
Return true if there is exactly one user of this value.
StringRef - Represent a constant reference to a string, i.e.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
const Instruction * getFirstNonPHIOrDbg() const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic...
bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, const DataLayout &DL, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested...
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * getPointerOperand()
static Instruction * unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
static bool isMinMaxWithLoads(Value *V)
Returns true if instruction represent minmax pattern like: select ((cmp load V1, load V2)...
const BasicBlock * getParent() const
an instruction to allocate memory on the stack