15 #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H 16 #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H 25 class X86TargetMachine;
651 bool hasSymbolicDisplacement =
true);
656 bool is64Bit,
bool IsVarArg,
bool GuaranteeTCO);
667 unsigned getJumpTableEncoding()
const override;
668 bool useSoftFloat()
const override;
687 unsigned JTI,
MCContext &Ctx)
const override;
693 unsigned getByValTypeAlignment(
Type *Ty,
707 EVT getOptimalMemOpType(uint64_t
Size,
unsigned DstAlign,
unsigned SrcAlign,
708 bool IsMemset,
bool ZeroMemset,
bool MemcpyStrSrc,
717 bool isSafeMemOpType(
MVT VT)
const override;
721 bool allowsMisalignedMemoryAccesses(
EVT VT,
unsigned AS,
unsigned Align,
722 bool *
Fast)
const override;
732 void LowerOperationWrapper(
SDNode *
N,
754 bool isDesirableToCombineBuildVectorToShuffleTruncate(
761 bool isTypeDesirableForOp(
unsigned Opc,
EVT VT)
const override;
767 bool IsDesirableToPromoteOp(
SDValue Op,
EVT &PVT)
const override;
774 const char *getTargetNodeName(
unsigned Opcode)
const override;
781 bool isCheapToSpeculateCttz()
const override;
783 bool isCheapToSpeculateCtlz()
const override;
785 bool isCtlzFast()
const override;
807 bool isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI)
const override;
809 bool hasAndNotCompare(
SDValue Y)
const override;
811 bool hasAndNot(
SDValue Y)
const override;
813 bool preferShiftsToClearExtremeBits(
SDValue Y)
const override;
817 unsigned KeptBits)
const override {
822 auto VTIsOk = [](
EVT VT) ->
bool {
830 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
833 bool shouldSplatInsEltVarIndex(
EVT VT)
const override;
840 MVT hasFastEqualityCompare(
unsigned NumBits)
const override;
849 EVT VT)
const override;
851 bool targetShrinkDemandedConstant(
SDValue Op,
const APInt &Demanded,
856 void computeKnownBitsForTargetNode(
const SDValue Op,
858 const APInt &DemandedElts,
860 unsigned Depth = 0)
const override;
863 unsigned ComputeNumSignBitsForTargetNode(
SDValue Op,
864 const APInt &DemandedElts,
866 unsigned Depth)
const override;
868 bool SimplifyDemandedVectorEltsForTargetNode(
SDValue Op,
869 const APInt &DemandedElts,
873 unsigned Depth)
const override;
875 bool SimplifyDemandedBitsForTargetNode(
SDValue Op,
877 const APInt &DemandedElts,
880 unsigned Depth)
const override;
886 bool ExpandInlineAsm(
CallInst *CI)
const override;
894 const char *constraint)
const override;
896 const char *LowerXConstraint(
EVT ConstraintVT)
const override;
901 void LowerAsmOperandForConstraint(
SDValue Op,
902 std::string &Constraint,
903 std::vector<SDValue> &Ops,
908 if (ConstraintCode ==
"i")
910 else if (ConstraintCode ==
"o")
912 else if (ConstraintCode ==
"v")
914 else if (ConstraintCode ==
"X")
923 std::pair<unsigned, const TargetRegisterClass *>
930 Type *Ty,
unsigned AS,
937 bool isLegalICmpImmediate(int64_t Imm)
const override;
943 bool isLegalAddImmediate(int64_t Imm)
const override;
945 bool isLegalStoreImmediate(int64_t Imm)
const override;
953 unsigned AS)
const override;
955 bool isVectorShiftByScalarCheap(
Type *Ty)
const override;
960 bool isTruncateFree(
Type *Ty1,
Type *Ty2)
const override;
961 bool isTruncateFree(
EVT VT1,
EVT VT2)
const override;
963 bool allowTruncateForTailCall(
Type *Ty1,
Type *Ty2)
const override;
973 bool isZExtFree(
Type *Ty1,
Type *Ty2)
const override;
974 bool isZExtFree(
EVT VT1,
EVT VT2)
const override;
975 bool isZExtFree(
SDValue Val,
EVT VT2)
const override;
979 bool isVectorLoadExtDesirable(
SDValue)
const override;
984 bool isFMAFasterThanFMulAndFAdd(
EVT VT)
const override;
989 bool isNarrowingProfitable(
EVT VT1,
EVT VT2)
const override;
997 unsigned Intrinsic)
const override;
1002 bool isFPImmLegal(
const APFloat &Imm,
EVT VT)
const override;
1016 bool areJTsAllowed(
const Function *Fn)
const override;
1025 return !X86ScalarSSEf64 || VT ==
MVT::f80;
1031 EVT NewVT)
const override;
1036 return (VT ==
MVT::f64 && X86ScalarSSEf64) ||
1037 (VT ==
MVT::f32 && X86ScalarSSEf32);
1042 bool shouldConvertConstantLoadToIntImm(
const APInt &Imm,
1043 Type *Ty)
const override;
1045 bool reduceSelectOfFPConstantLoads(
bool IsFPSetCC)
const override;
1047 bool convertSelectOfConstantsToMath(
EVT VT)
const override;
1049 bool decomposeMulByConstant(
EVT VT,
SDValue C)
const override;
1051 bool shouldUseStrictFP_TO_INT(
EVT FpVT,
EVT IntVT,
1052 bool IsSigned)
const override;
1056 bool isExtractSubvectorCheap(
EVT ResVT,
EVT SrcVT,
1057 unsigned Index)
const override;
1062 bool shouldScalarizeBinop(
SDValue)
const override;
1065 unsigned AddrSpace)
const override {
1071 bool isLoadBitCastBeneficial(
EVT LoadVT,
EVT BitcastVT)
const override;
1078 unsigned getRegisterByName(
const char* RegName,
EVT VT,
1084 getExceptionPointerRegister(
const Constant *PersonalityFn)
const override;
1089 getExceptionSelectorRegister(
const Constant *PersonalityFn)
const override;
1091 virtual bool needsFixedCatchObjects()
const override;
1102 bool useLoadStackGuardNode()
const override;
1103 bool useStackGuardXorFP()
const override;
1104 void insertSSPDeclarations(
Module &M)
const override;
1105 Value *getSDagStackGuard(
const Module &M)
const override;
1106 Value *getSSPStackGuardCheck(
const Module &M)
const override;
1108 const SDLoc &DL)
const override;
1119 bool isNoopAddrSpaceCast(
unsigned SrcAS,
unsigned DestAS)
const override;
1125 EVT VT)
const override;
1127 unsigned getNumRegistersForCallingConv(
LLVMContext &Context,
1129 EVT VT)
const override;
1133 bool supportSwiftError()
const override;
1143 bool lowerInterleavedLoad(
LoadInst *LI,
1146 unsigned Factor)
const override;
1151 unsigned Factor)
const override;
1158 std::pair<const TargetRegisterClass *, uint8_t>
1160 MVT VT)
const override;
1170 bool X86ScalarSSEf32;
1171 bool X86ScalarSSEf64;
1174 std::vector<APFloat> LegalFPImmediates;
1178 void addLegalFPImmediate(
const APFloat& Imm) {
1179 LegalFPImmediates.push_back(Imm);
1205 bool isCalleeStructRet,
1206 bool isCallerStructRet,
1213 SDValue Chain,
bool IsTailCall,
1214 bool Is64Bit,
int FPDiff,
1215 const SDLoc &dl)
const;
1217 unsigned GetAlignedArgumentStackSize(
unsigned StackSize,
1220 unsigned getAddressSpace(
void)
const;
1224 bool isReplace)
const;
1231 unsigned getGlobalWrapperKind(
const GlobalValue *GV =
nullptr,
1232 const unsigned char OpFlags = 0)
const;
1286 void insertCopiesSplitCSR(
1290 bool isUsedByReturnOnly(
SDNode *N,
SDValue &Chain)
const override;
1292 bool mayBeEmittedAsTailCall(
const CallInst *CI)
const override;
1305 shouldExpandAtomicLoadInIR(
LoadInst *SI)
const override;
1306 bool shouldExpandAtomicStoreInIR(
StoreInst *SI)
const override;
1308 shouldExpandAtomicRMWInIR(
AtomicRMWInst *AI)
const override;
1311 lowerIdempotentRMWIntoFencedLoad(
AtomicRMWInst *AI)
const override;
1313 bool needsCmpXchgNb(
Type *MemType)
const;
1325 EmitVAStartSaveXMMRegsWithCustomInserter(
MachineInstr &BInstr,
1394 int &RefinementSteps,
bool &UseOneConstNR,
1395 bool Reciprocal)
const override;
1399 int &RefinementSteps)
const override;
1402 unsigned combineRepeatedFPDivisors()
const override;
1416 :
MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1434 :
MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1504 :
MemSDNode(Opc, Order, dl, VTs, MemVT, MMO) {}
1546 template <
typename T =
int>
1549 assert(Mask.
empty() &&
"Expected an empty shuffle mask vector");
1552 for (
int i = 0; i < NumElts; ++i) {
1553 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
1554 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
1555 Pos += (Unary ? 0 : NumElts * (i % 2));
1556 Pos += (Lo ? 0 : NumEltsInLane / 2);
1565 template <
typename T>
1568 assert(0 < Scale &&
"Unexpected scaling factor");
1569 int NumElts = Mask.
size();
1570 ScaledMask.
assign(static_cast<size_t>(NumElts * Scale), -1);
1572 for (
int i = 0; i != NumElts; ++i) {
1577 for (
int s = 0; s != Scale; ++s)
1578 ScaledMask[(Scale * i) + s] = M;
1583 for (
int s = 0; s != Scale; ++s)
1584 ScaledMask[(Scale * i) + s] = (Scale * M) + s;
1589 #endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
const SDValue & getIndex() const
Double shift instructions.
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
static MVT getIntegerVT(unsigned BitWidth)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
TruncUSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
BUILTIN_OP_END - This must be the last enum value in this list.
A parsed version of the target data layout string in and methods for querying it. ...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
Vector comparison generating mask bits for fp and integer signed and unsigned data types...
Repeat move, corresponds to X86::REP_MOVSx.
void createUnpackShuffleMask(MVT VT, SmallVectorImpl< T > &Mask, bool Lo, bool Unary)
Generate unpacklo/unpackhi shuffle mask.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static bool classof(const SDNode *N)
Return with a flag operand.
const SDValue & getValue() const
const SDValue & getBasePtr() const
This class represents lattice values for constants.
const SDValue & getScale() const
Compute Double Block Packed Sum-Absolute-Differences.
A Module instance is used to store all the information related to an LLVM module. ...
bool mergeStoresAfterLegalization() const override
Allow store merging after legalization in addition to before legalization.
static bool classof(const SDNode *N)
Same as call except it adds the NoTrack prefix.
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls...
bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
void push_back(const T &Elt)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class represents a function call, abstracting a target machine's calling convention.
unsigned getVectorNumElements() const
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
unsigned const TargetRegisterInfo * TRI
bool isInteger() const
Return true if this is an integer or a vector integer type.
static bool classof(const SDNode *N)
SSE4A Extraction and Insertion.
static bool classof(const SDNode *N)
An instruction for reading from memory.
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Bitwise logical ANDNOT of floating point values.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
This operation implements the lowering for readcyclecounter.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
X86MaskedGatherScatterSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
X86 compare and logical compare instructions.
MaskedTruncUSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Extract an 8-bit value from a vector and zero extend it to i32, corresponds to X86::PEXTRB.
A description of a memory reference used in the backend.
X86StoreSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
Dynamic (non-constant condition) vector blend where only the sign bits of the condition elements are ...
Bitwise Logical AND NOT of Packed FP values.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Base class for the full range of assembler expressions which are needed for parsing.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This instruction implements SINT_TO_FP with the integer source in memory and FP reg result...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Integer horizontal add/sub.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Copies a 64-bit value from the low word of an XMM vector to an MMX vector.
void assign(size_type NumElts, const T &Elt)
Context object for machine code objects.
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
Copies a 32-bit value from the low word of a MMX vector to a GPR.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
X86 FP SETCC, similar to above, but with output as an i1 mask and with optional rounding mode...
Return from interrupt. Operand 0 is the number of bytes to pop.
This contains information for each constraint that we are lowering.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
const SDValue & getMask() const
An instruction for storing to memory.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
static bool classof(const SDNode *N)
X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
const SDValue & getBasePtr() const
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
Floating point horizontal add/sub.
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
amdgpu Simplify well known AMD library false Value * Callee
Analysis containing CSE Info
Bitwise logical XOR of floating point values.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static bool classof(const SDNode *N)
const SDValue & getMask() const
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
This instruction implements an extending load to FP stack slots.
This is an important class for using LLVM in a threaded context.
Insert any element of a 4 x float vector into any element of a destination 4 x floatvector.
unsigned getScalarSizeInBits() const
size_t size() const
size - Get the array size.
This is an important base class in LLVM.
Repeat fill, corresponds to X86::REP_STOSx.
static bool is64Bit(const char *name)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, unsigned AddrSpace) const override
Return true if it is expected to be cheaper to do a store of a non-zero vector constant with the give...
X86 conditional branches.
Insert the lower 16-bits of a 32-bit value to a vector, corresponds to X86::PINSRW.
Commutative FMIN and FMAX.
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
On Darwin, this node represents the result of the popl at function entry, used for PIC code...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
const SDValue & getValue() const
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
std::vector< ArgListEntry > ArgListTy
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
These operations represent an abstract X86 call instruction, which includes a bunch of information...
Floating point max and min.
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
TruncSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
Copies a GPR into the low 32-bit word of a MMX vector and zero out the high word. ...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Provides information about what library functions are available for the current target.
X86 Read Time-Stamp Counter and Processor ID.
CCValAssign - Represent assignment of one arg/retval to a location.
unsigned getMemcmpEqZeroLoadsPerBlock() const override
Allow multiple load pairs per block for smaller and faster code.
X86MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
This is an abstract virtual class for memory operations.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Floating point reciprocal-sqrt and reciprocal approximation.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
const SDValue & getValue() const
Represents one node in the SelectionDAG.
X86 bit-test instructions.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static bool classof(const SDNode *N)
MaskedTruncSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
Class for arbitrary precision integers.
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool classof(const SDNode *N)
This instruction implements FP_TO_SINT with the integer destination in memory and a FP reg source...
const char * getClearCacheBuiltinName() const override
Intel processors have a unified instruction and data cache.
amdgpu Simplify well known AMD library false Value Value * Arg
bool ShouldShrinkFPConstant(EVT VT) const override
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
Representation of each machine instruction.
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
bool isVector() const
Return true if this is a vector value type.
Insert the lower 8-bits of a 32-bit value to a vector, corresponds to X86::PINSRB.
LLVM_NODISCARD bool empty() const
A wrapper node for TargetConstantPool, TargetJumpTable, TargetExternalSymbol, TargetGlobalAddress, TargetGlobalTLSAddress, MCSymbol and TargetBlockAddress.
Bitwise logical AND of floating point values.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
static bool classof(const SDNode *N)
X86MaskedStoreSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
LOCK-prefixed arithmetic read-modify-write instructions.
Extract a 16-bit value from a vector and zero extend it to i32, corresponds to X86::PEXTRW.
bool hasVectorBlend() const override
Return true if the target has a vector blend instruction.
Blend where the selector is an immediate.
X86MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
This instruction implements a truncating store to FP stack slots.
Combined add and sub on an FP vector.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
This instruction grabs the address of the next argument from a va_list.
LLVM Value Representation.
Bitwise logical OR of floating point values.
X86 Read Performance Monitoring Counters.
const SDValue & getBasePtr() const
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
bool isZeroNode(SDValue Elt)
Returns true if Elt is a constant zero or floating point constant +0.0.
StringRef - Represent a constant reference to a string, i.e.
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, bool hasSymbolicDisplacement=true)
Returns true of the given offset can be fit into displacement field of the instruction.
const SDValue & getPassThru() const
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
Compute Sum of Absolute Differences.
Scalar intrinsic floating point max and min.
MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override
EVT is not used in-tree, but is used by out-of-tree target.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
void scaleShuffleMask(int Scale, ArrayRef< T > Mask, SmallVectorImpl< T > &ScaledMask)
Helper function to scale a shuffle or target shuffle mask, replacing each mask index with the scaled ...
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
BRIND node with NoTrack prefix.
Shuffle 16 8-bit values within a vector.
This file describes how to lower LLVM code to machine code.
Special wrapper used under X86-64 PIC mode for RIP relative displacements.