27 #define DEBUG_TYPE "tti" 31 cl::desc(
"Recognize reduction patterns."));
45 : TTIImpl(new
Model<NoTTIImpl>(NoTTIImpl(DL))) {}
50 : TTIImpl(
std::move(
Arg.TTIImpl)) {}
53 TTIImpl = std::move(RHS.TTIImpl);
59 int Cost = TTIImpl->getOperationCost(Opcode, Ty, OpTy);
60 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
65 int Cost = TTIImpl->getCallCost(FTy, NumArgs);
66 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
72 int Cost = TTIImpl->getCallCost(F, Arguments);
73 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
78 return TTIImpl->getInliningThresholdMultiplier();
83 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands);
87 const Value *Src)
const {
88 return TTIImpl->getExtCost(I, Src);
93 int Cost = TTIImpl->getIntrinsicCost(IID, RetTy, Arguments);
94 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
100 unsigned &JTSize)
const {
101 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize);
106 int Cost = TTIImpl->getUserCost(U, Operands);
107 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
112 return TTIImpl->hasBranchDivergence();
116 return TTIImpl->isSourceOfDivergence(V);
120 return TTIImpl->isAlwaysUniform(V);
124 return TTIImpl->getFlatAddressSpace();
128 return TTIImpl->isLoweredToCall(F);
133 return TTIImpl->getUnrollingPreferences(L, SE, UP);
137 return TTIImpl->isLegalAddImmediate(Imm);
141 return TTIImpl->isLegalICmpImmediate(Imm);
150 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
151 Scale, AddrSpace, I);
155 return TTIImpl->isLSRCostLess(C1, C2);
159 return TTIImpl->canMacroFuseCmp();
163 return TTIImpl->shouldFavorPostInc();
167 return TTIImpl->isLegalMaskedStore(DataType);
171 return TTIImpl->isLegalMaskedLoad(DataType);
175 return TTIImpl->isLegalMaskedGather(DataType);
179 return TTIImpl->isLegalMaskedScatter(DataType);
183 return TTIImpl->hasDivRemOp(DataType, IsSigned);
187 unsigned AddrSpace)
const {
188 return TTIImpl->hasVolatileVariant(I, AddrSpace);
192 return TTIImpl->prefersVectorizedAddressing();
199 unsigned AddrSpace)
const {
200 int Cost = TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
202 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
207 return TTIImpl->LSRWithInstrQueries();
211 return TTIImpl->isTruncateFree(Ty1, Ty2);
215 return TTIImpl->isProfitableToHoist(I);
221 return TTIImpl->isTypeLegal(Ty);
225 return TTIImpl->getJumpBufAlignment();
229 return TTIImpl->getJumpBufSize();
233 return TTIImpl->shouldBuildLookupTables();
236 return TTIImpl->shouldBuildLookupTablesForConstant(C);
240 return TTIImpl->useColdCCForColdCall(F);
245 return TTIImpl->getScalarizationOverhead(Ty, Insert, Extract);
251 return TTIImpl->getOperandsScalarizationOverhead(Args, VF);
255 return TTIImpl->supportsEfficientVectorElementLoadStore();
259 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
264 return TTIImpl->enableMemCmpExpansion(IsZeroCmp);
268 return TTIImpl->enableInterleavedAccessVectorization();
272 return TTIImpl->enableMaskedInterleavedAccessVectorization();
276 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
284 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
290 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
294 return TTIImpl->haveFastSqrt(Ty);
298 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
302 int Cost = TTIImpl->getFPOpCost(Ty);
303 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
310 int Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
311 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
316 int Cost = TTIImpl->getIntImmCost(Imm, Ty);
317 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
323 int Cost = TTIImpl->getIntImmCost(Opcode, Idx, Imm, Ty);
324 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
330 int Cost = TTIImpl->getIntImmCost(IID, Idx, Imm, Ty);
331 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
336 return TTIImpl->getNumberOfRegisters(Vector);
340 return TTIImpl->getRegisterBitWidth(Vector);
344 return TTIImpl->getMinVectorRegisterBitWidth();
348 return TTIImpl->shouldMaximizeVectorBandwidth(OptSize);
352 return TTIImpl->getMinimumVF(ElemWidth);
356 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
357 return TTIImpl->shouldConsiderAddressTypePromotion(
358 I, AllowPromotionWithoutCommonHeader);
362 return TTIImpl->getCacheLineSize();
367 return TTIImpl->getCacheSize(Level);
372 return TTIImpl->getCacheAssociativity(Level);
376 return TTIImpl->getPrefetchDistance();
380 return TTIImpl->getMinPrefetchStride();
384 return TTIImpl->getMaxPrefetchIterationsAhead();
388 return TTIImpl->getMaxInterleaveFactor(VF);
396 if (
auto *CI = dyn_cast<ConstantInt>(V)) {
397 if (CI->getValue().isPowerOf2())
405 if (
auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
406 if (ShuffleInst->isZeroEltSplat())
413 if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
417 if (
auto *CI = dyn_cast<ConstantInt>(Splat))
418 if (CI->getValue().isPowerOf2())
420 }
else if (
auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
422 for (
unsigned I = 0,
E = CDS->getNumElements();
I !=
E; ++
I) {
423 if (
auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(
I)))
424 if (CI->getValue().isPowerOf2())
434 if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
445 int Cost = TTIImpl->getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
446 Opd1PropInfo, Opd2PropInfo, Args);
447 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
453 int Cost = TTIImpl->getShuffleCost(Kind, Ty, Index, SubTp);
454 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
461 "Opcode should reflect passed instruction.");
462 int Cost = TTIImpl->getCastInstrCost(Opcode, Dst, Src, I);
463 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
469 unsigned Index)
const {
470 int Cost = TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
471 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
476 int Cost = TTIImpl->getCFInstrCost(Opcode);
477 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
484 "Opcode should reflect passed instruction.");
485 int Cost = TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
486 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
491 unsigned Index)
const {
492 int Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
493 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
502 "Opcode should reflect passed instruction.");
503 int Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
504 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
512 TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
513 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
518 Value *Ptr,
bool VariableMask,
519 unsigned Alignment)
const {
520 int Cost = TTIImpl->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
522 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
528 unsigned Alignment,
unsigned AddressSpace,
bool UseMaskForCond,
529 bool UseMaskForGaps)
const {
530 int Cost = TTIImpl->getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
531 Alignment, AddressSpace,
534 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
540 unsigned ScalarizationCostPassed)
const {
541 int Cost = TTIImpl->getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
542 ScalarizationCostPassed);
543 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
549 int Cost = TTIImpl->getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
550 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
556 int Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys);
557 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
562 return TTIImpl->getNumberOfParts(Tp);
567 const SCEV *Ptr)
const {
568 int Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
569 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
574 bool IsPairwiseForm)
const {
575 int Cost = TTIImpl->getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm);
576 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
582 bool IsUnsigned)
const {
584 TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned);
585 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
591 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
596 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
600 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
605 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
611 unsigned DestAlign)
const {
612 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAlign,
618 unsigned RemainingBytes,
unsigned SrcAlign,
unsigned DestAlign)
const {
619 TTIImpl->getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
620 SrcAlign, DestAlign);
625 return TTIImpl->areInlineCompatible(Caller, Callee);
631 return TTIImpl->areFunctionArgsABICompatible(Caller, Callee, Args);
636 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
641 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
645 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
649 return TTIImpl->isLegalToVectorizeLoad(LI);
653 return TTIImpl->isLegalToVectorizeStore(SI);
657 unsigned ChainSizeInBytes,
unsigned Alignment,
unsigned AddrSpace)
const {
658 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
663 unsigned ChainSizeInBytes,
unsigned Alignment,
unsigned AddrSpace)
const {
664 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
670 unsigned ChainSizeInBytes,
672 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
677 unsigned ChainSizeInBytes,
679 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
684 return TTIImpl->useReductionIntrinsic(Opcode, Ty, Flags);
688 return TTIImpl->shouldExpandReduction(II);
691 int TargetTransformInfo::getInstructionLatency(
const Instruction *
I)
const {
692 return TTIImpl->getInstructionLatency(I);
699 if (!SI && Level == 0 && IsLeft)
708 for (
unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
712 return Mask == ActualMask;
724 struct ReductionData {
725 ReductionData() =
delete;
727 : Opcode(Opcode), LHS(LHS), RHS(RHS),
Kind(Kind) {
728 assert(Kind != RK_None &&
"expected binary or min/max reduction only.");
731 Value *LHS =
nullptr;
732 Value *RHS =
nullptr;
734 bool hasSameData(ReductionData &RD)
const {
735 return Kind == RD.Kind && Opcode == RD.Opcode;
743 return ReductionData(RK_Arithmetic, I->
getOpcode(), L, R);
744 if (
auto *
SI = dyn_cast<SelectInst>(I)) {
751 auto *CI = cast<CmpInst>(
SI->getCondition());
752 return ReductionData(RK_MinMax, CI->getOpcode(), L, R);
756 auto *CI = cast<CmpInst>(
SI->getCondition());
757 return ReductionData(RK_UnsignedMinMax, CI->getOpcode(), L, R);
765 unsigned NumLevels) {
789 if (!Level && !RS && !LS)
795 Value *NextLevelOp =
nullptr;
796 if (NextLevelOpR && NextLevelOpL) {
798 if (NextLevelOpL != NextLevelOpR)
801 NextLevelOp = NextLevelOpL;
802 }
else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
809 if (NextLevelOpL && NextLevelOpL != RD->RHS)
811 else if (NextLevelOpR && NextLevelOpR != RD->LHS)
814 NextLevelOp = NextLevelOpL ? RD->RHS : RD->LHS;
820 if (Level + 1 != NumLevels) {
823 if (!NextLevelRD || !RD->hasSameData(*NextLevelRD))
838 if (++Level == NumLevels)
847 unsigned &Opcode,
Type *&Ty) {
866 Type *VecTy = RdxStart->getType();
899 static std::pair<Value *, ShuffleVectorInst *>
903 if ((S = dyn_cast<ShuffleVectorInst>(L)))
904 return std::make_pair(R, S);
907 return std::make_pair(L, S);
912 unsigned &Opcode,
Type *&Ty) {
947 unsigned MaskStart = 1;
950 unsigned NumVecElemsRemain = NumVecElems;
951 while (NumVecElemsRemain - 1) {
956 if (!RDLevel || !RDLevel->hasSameData(*RD))
961 std::tie(NextRdxOp, Shuffle) =
965 if (Shuffle ==
nullptr)
971 for (
unsigned j = 0; j != MaskStart; ++j)
972 ShuffleMask[j] = MaskStart + j;
974 std::fill(&ShuffleMask[MaskStart], ShuffleMask.
end(), -1);
977 if (ShuffleMask != Mask)
981 NumVecElemsRemain /= 2;
990 int TargetTransformInfo::getInstructionThroughput(
const Instruction *I)
const {
992 case Instruction::GetElementPtr:
996 case Instruction::PHI:
997 case Instruction::Br: {
1001 case Instruction::FAdd:
1002 case Instruction::Sub:
1003 case Instruction::FSub:
1004 case Instruction::Mul:
1005 case Instruction::FMul:
1006 case Instruction::UDiv:
1007 case Instruction::SDiv:
1008 case Instruction::FDiv:
1009 case Instruction::URem:
1010 case Instruction::SRem:
1011 case Instruction::FRem:
1012 case Instruction::Shl:
1013 case Instruction::LShr:
1014 case Instruction::AShr:
1015 case Instruction::And:
1016 case Instruction::Or:
1017 case Instruction::Xor: {
1024 Op1VP, Op2VP, Operands);
1031 case Instruction::ICmp:
1032 case Instruction::FCmp: {
1049 case Instruction::ZExt:
1050 case Instruction::SExt:
1051 case Instruction::FPToUI:
1052 case Instruction::FPToSI:
1053 case Instruction::FPExt:
1054 case Instruction::PtrToInt:
1055 case Instruction::IntToPtr:
1056 case Instruction::SIToFP:
1057 case Instruction::UIToFP:
1058 case Instruction::Trunc:
1059 case Instruction::FPTrunc:
1060 case Instruction::BitCast:
1061 case Instruction::AddrSpaceCast: {
1065 case Instruction::ExtractElement: {
1074 unsigned ReduxOpCode;
1085 case RK_UnsignedMinMax:
1101 case RK_UnsignedMinMax:
1112 case Instruction::InsertElement: {
1121 case Instruction::ShuffleVector: {
1138 return TTIImpl->getShuffleCost(
SK_Reverse, Ty, 0,
nullptr);
1141 return TTIImpl->getShuffleCost(
SK_Select, Ty, 0,
nullptr);
1144 return TTIImpl->getShuffleCost(
SK_Transpose, Ty, 0,
nullptr);
1147 return TTIImpl->getShuffleCost(
SK_Broadcast, Ty, 0,
nullptr);
1159 if (
auto *FPMO = dyn_cast<FPMathOperator>(II))
1160 FMF = FPMO->getFastMathFlags();
1178 : TTICallback(
std::move(TTICallback)) {}
1182 return TTICallback(F);
1193 "Target Transform Information",
false,
true)
1196 void TargetTransformInfoWrapperPass::anchor() {}
1213 TTI = TIRA.
run(F, DummyFAM);
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
Value * getValueOperand()
A parsed version of the target data layout string in and methods for querying it. ...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
This class represents lattice values for constants.
const Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
The main scalar evolution driver.
ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
bool isReverse() const
Return true if this shuffle swaps the order of elements from exactly one source vector.
Analysis pass providing the TargetTransformInfo.
This instruction constructs a fixed permutation of two input vectors.
An instruction for reading from memory.
MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > m_UnordFMax(const LHS &L, const RHS &R)
Match an 'unordered' floating point maximum function.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isSingleSource() const
Return true if this shuffle chooses elements from exactly one source vector without changing the leng...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool match(Val *V, const Pattern &P)
This class represents the LLVM 'select' instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static AnalysisKey * ID()
Returns an opaque, unique ID for this analysis type.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > m_UnordFMin(const LHS &L, const RHS &R)
Match an 'unordered' floating point minimum function.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Class to represent function types.
Type * getType() const
All values are typed, get the type of this value.
MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty > m_OrdFMin(const LHS &L, const RHS &R)
Match an 'ordered' floating point minimum function.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
An instruction for storing to memory.
void initializeTargetTransformInfoWrapperPassPass(PassRegistry &)
bool isSelect() const
Return true if this shuffle chooses elements from its source vectors without lane crossings and all o...
VectorType * getType() const
Overload to return most specific vector type.
Value * getOperand(unsigned i) const
Analysis containing CSE Info
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
initializer< Ty > init(const Ty &Val)
This instruction inserts a single (scalar) element into a VectorType value.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
bool isIdentity() const
Return true if this shuffle chooses elements from exactly one source vector without lane crossings an...
AMDGPU Lower Kernel Arguments
TargetIRAnalysis()
Default construct a target IR analysis.
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
const Value * getCondition() const
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This is the shared class of boolean and integer constants.
ImmutablePass class - This class is used to provide information that does not need to be run...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Module.h This file contains the declarations for the Module class.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
bool changesLength() const
Return true if this shuffle returns a vector with a different number of elements than its source vect...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned getVectorNumElements() const
bool isZeroEltSplat() const
Return true if all elements of this shuffle are the same value as the first element of exactly one so...
Class to represent vector types.
Class for arbitrary precision integers.
Result run(const Function &F, FunctionAnalysisManager &)
amdgpu Simplify well known AMD library false Value Value * Arg
bool isTranspose() const
Return true if this shuffle transposes the elements of its inputs without changing the length of the ...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
unsigned getAlignment() const
Return the alignment of the access that is being performed.
This class represents an analyzed expression in the program.
Represents a single loop in the control flow graph.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
iterator_range< value_op_iterator > operand_values()
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
unsigned getAlignment() const
Return the alignment of the access that is being performed.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
TargetTransformInfo Result
MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty > m_OrdFMax(const LHS &L, const RHS &R)
Match an 'ordered' floating point maximum function.
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
print Print MemDeps of function
Convenience struct for specifying and reasoning about fast-math flags.
A container for analyses that lazily runs them and caches their results.
VectorType * getType() const
Overload to return most specific vector type.
Information about a load/store intrinsic defined by the target.
A special type used by analysis passes to provide an address that identifies that particular analysis...
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
A wrapper class for inspecting calls to intrinsic functions.