| AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F) | llvm::AArch64TTIImpl | inlineexplicit |
| allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, unsigned Alignment, bool *Fast) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| TargetTransformInfoImplCRTPBase< AArch64TTIImpl >::allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, unsigned Alignment, bool *Fast) | llvm::TargetTransformInfoImplBase | inline |
| areFunctionArgsABICompatible(const Function *Caller, const Function *Callee, SmallPtrSetImpl< Argument *> &Args) const | llvm::TargetTransformInfoImplBase | inline |
| areInlineCompatible(const Function *Caller, const Function *Callee) const | llvm::AArch64TTIImpl | |
| BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlineexplicitprotected |
| canMacroFuseCmp() | llvm::TargetTransformInfoImplBase | inline |
| DL | llvm::TargetTransformInfoImplBase | protected |
| enableAggressiveInterleaving(bool LoopHasReductions) | llvm::TargetTransformInfoImplBase | inline |
| enableInterleavedAccessVectorization() | llvm::AArch64TTIImpl | inline |
| enableMaskedInterleavedAccessVectorization() | llvm::TargetTransformInfoImplBase | inline |
| enableMemCmpExpansion(bool IsZeroCmp) const | llvm::TargetTransformInfoImplBase | inline |
| getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) | llvm::AArch64TTIImpl | |
| getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value *> Args=ArrayRef< const Value *>()) | llvm::AArch64TTIImpl | |
| getArithmeticReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm) | llvm::AArch64TTIImpl | |
| getAtomicMemIntrinsicMaxElementSize() const | llvm::TargetTransformInfoImplBase | inline |
| getCacheAssociativity(TargetTransformInfo::CacheLevel Level) | llvm::TargetTransformInfoImplBase | inline |
| getCacheLineSize() | llvm::AArch64TTIImpl | |
| getCacheSize(TargetTransformInfo::CacheLevel Level) | llvm::TargetTransformInfoImplBase | inline |
| getCallCost(const Function *F, int NumArgs) | llvm::TargetTransformInfoImplCRTPBase< AArch64TTIImpl > | inline |
| getCallCost(const Function *F, ArrayRef< const Value * > Arguments) | llvm::TargetTransformInfoImplCRTPBase< AArch64TTIImpl > | inline |
| llvm::TargetTransformInfoImplBase::getCallCost(FunctionType *FTy, int NumArgs) | llvm::TargetTransformInfoImplBase | inline |
| getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
| getCFInstrCost(unsigned Opcode) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
| getConstantStrideStep(ScalarEvolution *SE, const SCEV *Ptr) | llvm::TargetTransformInfoImplBase | inlineprotected |
| getCostOfKeepingLiveOverCall(ArrayRef< Type *> Tys) | llvm::AArch64TTIImpl | |
| getDataLayout() const | llvm::TargetTransformInfoImplBase | inline |
| getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getExtCost(const Instruction *I, const Value *Src) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) | llvm::AArch64TTIImpl | |
| getFlatAddressSpace() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getFPOpCost(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, unsigned Alignment) | llvm::TargetTransformInfoImplBase | inline |
| getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getInliningThresholdMultiplier() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getInstructionLatency(const Instruction *I) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, unsigned Alignment, unsigned AddressSpace, bool UseMaskForCond=false, bool UseMaskForGaps=false) | llvm::AArch64TTIImpl | |
| getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) | llvm::TargetTransformInfoImplBase | inline |
| getIntImmCost(int64_t Val) | llvm::AArch64TTIImpl | |
| getIntImmCost(const APInt &Imm, Type *Ty) | llvm::AArch64TTIImpl | |
| getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) | llvm::AArch64TTIImpl | |
| getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty) | llvm::AArch64TTIImpl | |
| getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< const Value * > Arguments) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< Type * > ParamTys) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< Value * > Args, FastMathFlags FMF, unsigned VF=1) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, ArrayRef< Type * > Tys, FastMathFlags FMF, unsigned ScalarizationCostPassed=std::numeric_limits< unsigned >::max()) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getJumpBufAlignment() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getJumpBufSize() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getLoadStoreVecRegBitWidth(unsigned AddrSpace) const | llvm::TargetTransformInfoImplBase | inline |
| getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const | llvm::TargetTransformInfoImplBase | inline |
| getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace) | llvm::TargetTransformInfoImplBase | inline |
| getMaxInterleaveFactor(unsigned VF) | llvm::AArch64TTIImpl | |
| getMaxPrefetchIterationsAhead() | llvm::AArch64TTIImpl | |
| getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAlign, unsigned DestAlign) const | llvm::TargetTransformInfoImplBase | inline |
| getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type *> &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const | llvm::TargetTransformInfoImplBase | inline |
| getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
| getMinimumVF(unsigned ElemWidth) const | llvm::TargetTransformInfoImplBase | inline |
| getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwise, bool) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getMinPrefetchStride() | llvm::AArch64TTIImpl | |
| getMinVectorRegisterBitWidth() | llvm::AArch64TTIImpl | inline |
| getNumberOfParts(Type *Tp) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getNumberOfRegisters(bool Vector) | llvm::AArch64TTIImpl | inline |
| getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, unsigned VF) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) | llvm::AArch64TTIImpl | |
| getPopcntSupport(unsigned TyWidth) | llvm::AArch64TTIImpl | |
| getPrefetchDistance() | llvm::AArch64TTIImpl | |
| getRegisterBitWidth(bool Vector) const | llvm::AArch64TTIImpl | inline |
| getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getScalarizationOverhead(Type *VecTy, ArrayRef< const Value * > Args) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp) | llvm::AArch64TTIImpl | |
| getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const | llvm::TargetTransformInfoImplBase | inline |
| getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) | llvm::AArch64TTIImpl | |
| getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP) | llvm::AArch64TTIImpl | |
| getUserCost(const User *U, ArrayRef< const Value * > Operands) | llvm::TargetTransformInfoImplCRTPBase< AArch64TTIImpl > | inline |
| getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) | llvm::AArch64TTIImpl | |
| getVectorSplitCost() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| hasBranchDivergence() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| hasDivRemOp(Type *DataType, bool IsSigned) | llvm::TargetTransformInfoImplBase | inline |
| hasVolatileVariant(Instruction *I, unsigned AddrSpace) | llvm::TargetTransformInfoImplBase | inline |
| haveFastSqrt(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isAlwaysUniform(const Value *V) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance) | llvm::TargetTransformInfoImplBase | inlineprotected |
| isFCmpOrdCheaperThanFCmpZero(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isFPVectorizationPotentiallyUnsafe() | llvm::TargetTransformInfoImplBase | inline |
| isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isLegalAddImmediate(int64_t imm) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isLegalICmpImmediate(int64_t imm) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isLegalMaskedGather(Type *DataType) | llvm::TargetTransformInfoImplBase | inline |
| isLegalMaskedLoad(Type *DataType) | llvm::TargetTransformInfoImplBase | inline |
| isLegalMaskedScatter(Type *DataType) | llvm::TargetTransformInfoImplBase | inline |
| isLegalMaskedStore(Type *DataType) | llvm::TargetTransformInfoImplBase | inline |
| isLegalToVectorizeLoad(LoadInst *LI) const | llvm::TargetTransformInfoImplBase | inline |
| isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, unsigned Alignment, unsigned AddrSpace) const | llvm::TargetTransformInfoImplBase | inline |
| isLegalToVectorizeStore(StoreInst *SI) const | llvm::TargetTransformInfoImplBase | inline |
| isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, unsigned Alignment, unsigned AddrSpace) const | llvm::TargetTransformInfoImplBase | inline |
| isLoweredToCall(const Function *F) | llvm::TargetTransformInfoImplBase | inline |
| isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| TargetTransformInfoImplCRTPBase< AArch64TTIImpl >::isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) | llvm::TargetTransformInfoImplBase | inline |
| isProfitableToHoist(Instruction *I) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isSourceOfDivergence(const Value *V) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isStridedAccess(const SCEV *Ptr) | llvm::TargetTransformInfoImplBase | inlineprotected |
| isTruncateFree(Type *Ty1, Type *Ty2) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| isTypeLegal(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| LSRWithInstrQueries() | llvm::TargetTransformInfoImplBase | inline |
| minRequiredElementSize(const Value *Val, bool &isSigned) | llvm::TargetTransformInfoImplBase | inlineprotected |
| prefersVectorizedAddressing() | llvm::TargetTransformInfoImplBase | inline |
| shouldBuildLookupTables() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| shouldBuildLookupTablesForConstant(Constant *C) | llvm::TargetTransformInfoImplBase | inline |
| shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) | llvm::AArch64TTIImpl | |
| shouldExpandReduction(const IntrinsicInst *II) const | llvm::AArch64TTIImpl | inline |
| shouldFavorPostInc() const | llvm::TargetTransformInfoImplBase | inline |
| shouldMaximizeVectorBandwidth(bool OptSize) const | llvm::TargetTransformInfoImplBase | inline |
| supportsEfficientVectorElementLoadStore() | llvm::TargetTransformInfoImplBase | inline |
| TargetTransformInfoImplBase(const DataLayout &DL) | llvm::TargetTransformInfoImplBase | inlineexplicitprotected |
| TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg) | llvm::TargetTransformInfoImplBase | inline |
| TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) | llvm::TargetTransformInfoImplBase | inline |
| TargetTransformInfoImplCRTPBase(const DataLayout &DL) | llvm::TargetTransformInfoImplCRTPBase< AArch64TTIImpl > | inlineexplicitprotected |
| useAA() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
| TargetTransformInfoImplCRTPBase< AArch64TTIImpl >::useAA() | llvm::TargetTransformInfoImplBase | inline |
| useColdCCForColdCall(Function &F) | llvm::TargetTransformInfoImplBase | inline |
| useReductionIntrinsic(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const | llvm::AArch64TTIImpl | |