91 #define DEBUG_TYPE "aarch64-lower" 93 STATISTIC(NumTailCalls,
"Number of tail calls");
94 STATISTIC(NumShiftInserts,
"Number of vector shift inserts");
95 STATISTIC(NumOptimizedImms,
"Number of times immediates were optimized");
99 cl::desc(
"Allow AArch64 SLI/SRI formation"),
107 cl::desc(
"Allow AArch64 Local Dynamic TLS code generation"),
112 cl::desc(
"Enable AArch64 logical imm instruction " 754 void AArch64TargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedBitwiseVT) {
821 for (
unsigned Opcode :
834 void AArch64TargetLowering::addDRTypeForNEON(
MVT VT) {
839 void AArch64TargetLowering::addQRTypeForNEON(
MVT VT) {
852 const APInt &Demanded,
855 uint64_t OldImm = Imm, NewImm, Enc;
856 uint64_t
Mask = ((uint64_t)(-1LL) >> (64 -
Size)), OrigMask =
Mask;
860 if (Imm == 0 || Imm == Mask ||
864 unsigned EltSize =
Size;
878 uint64_t NonDemandedBits = ~DemandedBits;
879 uint64_t InvertedImm = ~Imm & DemandedBits;
880 uint64_t RotatedImm =
881 ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) &
883 uint64_t Sum = RotatedImm + NonDemandedBits;
884 bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1));
885 uint64_t Ones = (Sum + Carry) & NonDemandedBits;
886 NewImm = (Imm | Ones) & Mask;
900 uint64_t
Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize;
903 if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0)
908 DemandedBits |= DemandedBitsHi;
914 while (EltSize < Size) {
915 NewImm |= NewImm << EltSize;
921 "demanded bits should never be altered");
922 assert(OldImm != NewImm &&
"the new imm shouldn't be equal to the old imm");
931 if (NewImm == 0 || NewImm == OrigMask) {
960 assert((Size == 32 || Size == 64) &&
961 "i32 or i64 is expected after legalization.");
972 NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
975 NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
978 NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri;
1012 EVT VT = cast<MemIntrinsicSDNode>(
Op)->getMemoryVT();
1022 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
1035 assert(BitWidth >= 8 &&
"Unexpected width!");
1039 assert(BitWidth >= 16 &&
"Unexpected width!");
1282 "SEH does not use catchret!");
1301 case AArch64::F128CSEL:
1304 case TargetOpcode::STACKMAP:
1305 case TargetOpcode::PATCHPOINT:
1478 bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
1480 <<
" legal: " << (IsLegal ?
"yes\n" :
"no\n"));
1502 const bool FullFP16 =
1521 if (
isCMN(RHS, CC)) {
1525 }
else if (
isCMN(LHS, CC)) {
1602 unsigned Opcode = 0;
1603 const bool FullFP16 =
1628 return DAG.
getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
1646 bool &MustBeFirst,
bool WillNegate,
1647 unsigned Depth = 0) {
1655 MustBeFirst =
false;
1662 bool IsOR = Opcode ==
ISD::OR;
1674 if (MustBeFirstL && MustBeFirstR)
1680 if (!CanNegateL && !CanNegateR)
1684 CanNegate = WillNegate && CanNegateL && CanNegateR;
1687 MustBeFirst = !CanNegate;
1692 MustBeFirst = MustBeFirstL || MustBeFirstR;
1737 Predicate = ExtraCC;
1750 bool IsOR = Opcode ==
ISD::OR;
1756 assert(ValidL &&
"Valid conjunction/disjunction tree");
1763 assert(ValidR &&
"Valid conjunction/disjunction tree");
1768 assert(!MustBeFirstR &&
"Valid conjunction/disjunction tree");
1777 bool NegateAfterAll;
1781 assert(CanNegateR &&
"at least one side must be negatable");
1782 assert(!MustBeFirstR &&
"invalid conjunction/disjunction tree");
1786 NegateAfterR =
true;
1789 NegateR = CanNegateR;
1790 NegateAfterR = !CanNegateR;
1793 NegateAfterAll = !Negate;
1795 assert(Opcode ==
ISD::AND &&
"Valid conjunction/disjunction tree");
1796 assert(!Negate &&
"Valid conjunction/disjunction tree");
1800 NegateAfterR =
false;
1801 NegateAfterAll =
false;
1821 bool DummyCanNegate;
1822 bool DummyMustBeFirst;
1834 auto isSupportedExtend = [&](
SDValue V) {
1839 if (
ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
1840 uint64_t
Mask = MaskCst->getZExtValue();
1841 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
1850 if (isSupportedExtend(Op))
1856 uint64_t Shift = ShiftCst->getZExtValue();
1858 return (Shift <= 4) ? 2 : 1;
1872 uint64_t
C = RHSC->getZExtValue();
1880 if ((VT ==
MVT::i32 && C != 0x80000000 &&
1882 (VT ==
MVT::i64 && C != 0x80000000ULL &&
1901 if ((VT ==
MVT::i32 && C != INT32_MAX &&
1912 if ((VT ==
MVT::i32 && C != UINT32_MAX &&
1935 if (!isa<ConstantSDNode>(RHS) ||
1966 if ((RHSC->
getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
1967 cast<LoadSDNode>(LHS)->getExtensionType() ==
ISD::ZEXTLOAD &&
1968 cast<LoadSDNode>(LHS)->getMemoryVT() ==
MVT::i16 &&
1970 int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
1994 AArch64cc = DAG.
getConstant(AArch64CC, dl, MVT_CC);
1998 static std::pair<SDValue, SDValue>
2001 "Unsupported value type");
2075 UpperBits).getValue(1);
2097 UpperBits).getValue(1);
2107 Value = DAG.
getNode(Opc, DL, VTs, LHS, RHS);
2110 return std::make_pair(Value, Overflow);
2181 if (!CFVal || !CTVal)
2186 if (CTVal->isAllOnesValue() && CFVal->
isNullValue()) {
2218 bool ExtraOp =
false;
2277 unsigned IsWrite = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
2278 unsigned Locality = cast<ConstantSDNode>(Op.
getOperand(3))->getZExtValue();
2279 unsigned IsData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
2281 bool IsStream = !Locality;
2285 assert(Locality <= 3 &&
"Prefetch locality out-of-range");
2289 Locality = 3 - Locality;
2293 unsigned PrfOp = (IsWrite << 4) |
2308 return LowerF128Call(Op, DAG, LC);
2417 In = DAG.
getNode(CastOpc, dl, CastVT, In);
2454 return LowerF128Call(Op, DAG, LC);
2471 Entry.IsSExt =
false;
2472 Entry.IsZExt =
false;
2473 Args.push_back(Entry);
2476 : RTLIB::SINCOS_STRET_F32;
2487 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2488 return CallResult.first;
2513 switch (OrigSimpleTy) {
2526 unsigned ExtOpcode) {
2550 unsigned HalfSize = EltSize / 2;
2552 if (!
isIntN(HalfSize,
C->getSExtValue()))
2555 if (!
isUIntN(HalfSize,
C->getZExtValue()))
2576 unsigned EltSize = VT.getScalarSizeInBits() / 2;
2577 unsigned NumElts = VT.getVectorNumElements();
2580 for (
unsigned i = 0; i != NumElts; ++i) {
2647 "unexpected type for custom-lowering ISD::MUL");
2650 unsigned NewOpc = 0;
2654 if (isN0SExt && isN1SExt)
2659 if (isN0ZExt && isN1ZExt)
2661 else if (isN1SExt || isN1ZExt) {
2695 "unexpected types for extended operands to VMULL");
2696 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
2704 return DAG.
getNode(N0->getOpcode(), DL, VT,
2711 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(
SDValue Op,
2713 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2768 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->
getGlobal() :
nullptr);
2771 "llvm.eh.recoverfp must take a function as the first argument");
2772 return IncomingFPOp;
2816 assert (StoreNode &&
"Can only custom lower store nodes");
2823 assert (VT.
isVector() &&
"Can only custom lower vector store types");
2851 return LowerGlobalAddress(Op, DAG);
2853 return LowerGlobalTLSAddress(Op, DAG);
2855 return LowerSETCC(Op, DAG);
2857 return LowerBR_CC(Op, DAG);
2859 return LowerSELECT(Op, DAG);
2861 return LowerSELECT_CC(Op, DAG);
2863 return LowerJumpTable(Op, DAG);
2865 return LowerBR_JT(Op, DAG);
2867 return LowerConstantPool(Op, DAG);
2869 return LowerBlockAddress(Op, DAG);
2871 return LowerVASTART(Op, DAG);
2873 return LowerVACOPY(Op, DAG);
2875 return LowerVAARG(Op, DAG);
2889 return LowerF128Call(Op, DAG, RTLIB::ADD_F128);
2891 return LowerF128Call(Op, DAG, RTLIB::SUB_F128);
2893 return LowerF128Call(Op, DAG, RTLIB::MUL_F128);
2895 return LowerF128Call(Op, DAG, RTLIB::DIV_F128);
2897 return LowerFP_ROUND(Op, DAG);
2899 return LowerFP_EXTEND(Op, DAG);
2901 return LowerFRAMEADDR(Op, DAG);
2903 return LowerSPONENTRY(Op, DAG);
2905 return LowerRETURNADDR(Op, DAG);
2907 return LowerADDROFRETURNADDR(Op, DAG);
2909 return LowerINSERT_VECTOR_ELT(Op, DAG);
2911 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2913 return LowerBUILD_VECTOR(Op, DAG);
2915 return LowerVECTOR_SHUFFLE(Op, DAG);
2917 return LowerEXTRACT_SUBVECTOR(Op, DAG);
2921 return LowerVectorSRA_SRL_SHL(Op, DAG);
2923 return LowerShiftLeftParts(Op, DAG);
2926 return LowerShiftRightParts(Op, DAG);
2928 return LowerCTPOP(Op, DAG);
2930 return LowerFCOPYSIGN(Op, DAG);
2932 return LowerVectorAND(Op, DAG);
2934 return LowerVectorOR(Op, DAG);
2941 return LowerINT_TO_FP(Op, DAG);
2944 return LowerFP_TO_INT(Op, DAG);
2946 return LowerFSINCOS(Op, DAG);
2948 return LowerFLT_ROUNDS_(Op, DAG);
2952 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2954 return LowerSTORE(Op, DAG);
2962 return LowerVECREDUCE(Op, DAG);
2964 return LowerATOMIC_LOAD_SUB(Op, DAG);
2966 return LowerATOMIC_LOAD_AND(Op, DAG);
2968 return LowerDYNAMIC_STACKALLOC(Op, DAG);
2976 #include "AArch64GenCallingConv.inc" 2980 bool IsVarArg)
const {
2985 return CC_AArch64_WebKit_JS;
2987 return CC_AArch64_GHC;
2994 return CC_AArch64_Win64_VarArg;
2996 return CC_AArch64_AAPCS;
2997 return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS;
2999 return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS;
3001 return CC_AArch64_AAPCS;
3008 : RetCC_AArch64_AAPCS;
3011 SDValue AArch64TargetLowering::LowerFormalArguments(
3030 unsigned NumArgs = Ins.
size();
3032 unsigned CurArgIdx = 0;
3033 for (
unsigned i = 0; i != NumArgs; ++i) {
3034 MVT ValVT = Ins[i].VT;
3035 if (Ins[i].isOrigArg()) {
3036 std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
3037 CurArgIdx = Ins[i].getOrigArgIndex();
3052 assert(!Res &&
"Call operand has unhandled type");
3057 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
3060 if (Ins[i].Flags.isByVal()) {
3064 int Size = Ins[i].Flags.getByValSize();
3065 unsigned NumRegs = (Size + 7) / 8;
3085 RC = &AArch64::GPR32RegClass;
3087 RC = &AArch64::GPR64RegClass;
3089 RC = &AArch64::FPR16RegClass;
3091 RC = &AArch64::FPR32RegClass;
3093 RC = &AArch64::FPR64RegClass;
3095 RC = &AArch64::FPR128RegClass;
3119 assert(RegVT == Ins[i].VT &&
"incorrect register location selected");
3132 !Ins[i].Flags.isInConsecutiveRegs())
3133 BEAlign = 8 - ArgSize;
3163 ExtType, DL, VA.
getLocVT(), Chain, FIN,
3180 saveVarArgRegisters(CCInfo, DAG, DL, Chain);
3184 unsigned StackOffset = CCInfo.getNextStackOffset();
3186 StackOffset = ((StackOffset + 7) & ~7);
3196 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
3201 unsigned StackArgSize = CCInfo.getNextStackOffset();
3203 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
3207 StackArgSize =
alignTo(StackArgSize, 16);
3227 void AArch64TargetLowering::saveVarArgRegisters(
CCState &CCInfo,
3240 AArch64::X3, AArch64::X4, AArch64::X5,
3241 AArch64::X6, AArch64::X7 };
3242 static const unsigned NumGPRArgRegs =
array_lengthof(GPRArgRegs);
3245 unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
3247 if (GPRSaveSize != 0) {
3250 if (GPRSaveSize & 15)
3258 for (
unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
3259 unsigned VReg = MF.
addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
3266 (i - FirstVariadicGPR) * 8)
3278 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
3279 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
3280 static const unsigned NumFPRArgRegs =
array_lengthof(FPRArgRegs);
3283 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
3285 if (FPRSaveSize != 0) {
3290 for (
unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
3291 unsigned VReg = MF.
addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
3306 if (!MemOps.
empty()) {
3313 SDValue AArch64TargetLowering::LowerCallResult(
3319 ? RetCC_AArch64_WebKit_JS
3320 : RetCC_AArch64_AAPCS;
3328 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3333 if (i == 0 && isThisReturn) {
3335 "unexpected return calling convention register assignment");
3378 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
3389 bool CCMatch = CallerCC == CalleeCC;
3397 if (i->hasByValAttr())
3425 "Unexpected variadic calling convention");
3428 if (isVarArg && !Outs.
empty()) {
3436 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
3440 if (!ArgLoc.isRegLoc())
3458 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3467 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
3485 SDValue AArch64TargetLowering::addTokenForArgument(
SDValue Chain,
3488 int ClobberedFI)
const {
3491 int64_t LastByte = FirstByte + MFI.
getObjectSize(ClobberedFI) - 1;
3502 if (
LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3504 if (FI->getIndex() < 0) {
3506 int64_t InLastByte = InFirstByte;
3509 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
3510 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
3518 bool AArch64TargetLowering::DoesCalleeRestoreStack(
CallingConv::ID CallCC,
3519 bool TailCallOpt)
const {
3526 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
3535 bool &IsTailCall = CLI.IsTailCall;
3537 bool IsVarArg = CLI.IsVarArg;
3540 bool IsThisReturn =
false;
3544 bool IsSibCall =
false;
3548 IsTailCall = isEligibleForTailCallOptimization(
3549 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
3550 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall())
3552 "site marked musttail");
3556 if (!TailCallOpt && IsTailCall)
3571 unsigned NumArgs = Outs.
size();
3573 for (
unsigned i = 0; i != NumArgs; ++i) {
3574 MVT ArgVT = Outs[i].VT;
3579 assert(!Res &&
"Call operand has unhandled type");
3589 unsigned NumArgs = Outs.
size();
3590 for (
unsigned i = 0; i != NumArgs; ++i) {
3591 MVT ValVT = Outs[i].VT;
3594 CLI.getArgs()[Outs[i].OrigArgIndex].Ty,
3606 assert(!Res &&
"Call operand has unhandled type");
3627 if (IsTailCall && !IsSibCall) {
3632 NumBytes =
alignTo(NumBytes, 16);
3637 FPDiff = NumReusableBytes - NumBytes;
3644 assert(FPDiff % 16 == 0 &&
"unaligned stack on tail call");
3659 if (IsVarArg && CLI.CS && CLI.CS.isMustTailCall()) {
3661 for (
const auto &
F : Forwards) {
3663 RegsToPass.
push_back(std::make_pair(
unsigned(
F.PReg), Val));
3668 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size(); i != e;
3669 ++i, ++realArgIdx) {
3687 if (Outs[realArgIdx].ArgVT ==
MVT::i1) {
3706 "unexpected calling convention register assignment");
3708 "unexpected use of 'returned'");
3709 IsThisReturn =
true;
3723 OpSize = (OpSize + 7) / 8;
3727 BEAlign = 8 - OpSize;
3730 int32_t
Offset = LocMemOffset + BEAlign;
3735 Offset = Offset + FPDiff;
3745 Chain = addTokenForArgument(Chain, DAG, MF.
getFrameInfo(), FI);
3754 if (Outs[i].Flags.
isByVal()) {
3758 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.
getByValAlign(),
3778 if (!MemOpChains.
empty())
3784 for (
auto &RegToPass : RegsToPass) {
3786 RegToPass.second, InFlag);
3793 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3794 auto GV =
G->getGlobal();
3799 }
else if (Subtarget->
isTargetCOFF() && GV->hasDLLImportStorageClass()) {
3801 "Windows is the only supported COFF target");
3807 }
else if (
auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3810 const char *Sym = S->getSymbol();
3814 const char *Sym = S->getSymbol();
3823 if (IsTailCall && !IsSibCall) {
3829 std::vector<SDValue> Ops;
3830 Ops.push_back(Chain);
3831 Ops.push_back(Callee);
3842 for (
auto &RegToPass : RegsToPass)
3844 RegToPass.second.getValueType()));
3853 IsThisReturn =
false;
3865 assert(Mask &&
"Missing call preserved mask for calling convention");
3869 Ops.push_back(InFlag);
3884 uint64_t CalleePopBytes =
3885 DoesCalleeRestoreStack(CallConv, TailCallOpt) ?
alignTo(NumBytes, 16) : 0;
3895 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
3896 InVals, IsThisReturn,
3897 IsThisReturn ? OutVals[0] :
SDValue());
3900 bool AArch64TargetLowering::CanLowerReturn(
3904 ? RetCC_AArch64_WebKit_JS
3905 : RetCC_AArch64_AAPCS;
3907 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3918 ? RetCC_AArch64_WebKit_JS
3919 : RetCC_AArch64_AAPCS;
3928 for (
unsigned i = 0, realRVLocIdx = 0; i != RVLocs.
size();
3929 ++i, ++realRVLocIdx) {
3938 if (Outs[i].ArgVT ==
MVT::i1) {
3960 if (AArch64::GPR64RegClass.
contains(*I))
3962 else if (AArch64::FPR64RegClass.
contains(*I))
3984 unsigned Flag)
const {
3991 unsigned Flag)
const {
3997 unsigned Flag)
const {
4004 unsigned Flag)
const {
4009 template <
class NodeTy>
4011 unsigned Flags)
const {
4022 template <
class NodeTy>
4024 unsigned Flags)
const {
4038 template <
class NodeTy>
4040 unsigned Flags)
const {
4052 template <
class NodeTy>
4054 unsigned Flags)
const {
4058 SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
4066 unsigned char OpFlags =
4071 "unexpected offset in global node");
4076 return getGOT(GN, DAG, OpFlags);
4081 Result = getAddrLarge(GN, DAG, OpFlags);
4083 Result = getAddrTiny(GN, DAG, OpFlags);
4085 Result = getAddr(GN, DAG, OpFlags);
4124 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(
SDValue Op,
4127 "This function expects a Darwin target");
4131 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
4188 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(
SDValue SymAddr,
4204 AArch64TargetLowering::LowerELFGlobalTLSAddress(
SDValue Op,
4272 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
4296 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
4304 AArch64TargetLowering::LowerWindowsGlobalTLSAddress(
SDValue Op,
4369 return LowerDarwinGlobalTLSAddress(Op, DAG);
4371 return LowerELFGlobalTLSAddress(Op, DAG);
4373 return LowerWindowsGlobalTLSAddress(Op, DAG);
4390 bool ProduceNonFlagSettingCondBr =
4435 if (RHSC && RHSC->
getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
4524 else if (SrcVT.
bitsGT(VT))
4531 auto setVecVal = [&] (
int Idx) {
4545 EltMask = 0x80000000ULL;
4546 setVecVal(AArch64::ssub);
4555 setVecVal(AArch64::dsub);
4558 EltMask = 0x8000ULL;
4559 setVecVal(AArch64::hsub);
4624 "Unexpected type for custom ctpop lowering");
4631 unsigned EltSize = 8;
4648 return LowerVSETCC(Op, DAG);
4668 "Unexpected setcc expansion!");
4777 }
else if (CTVal && CFVal) {
4778 const int64_t TrueVal = CTVal->getSExtValue();
4785 if (TrueVal == ~FalseVal) {
4787 }
else if (TrueVal == -FalseVal) {
4795 const uint32_t TrueVal32 = CTVal->getZExtValue();
4798 if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
4801 if (TrueVal32 > FalseVal32) {
4806 }
else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
4809 if (TrueVal > FalseVal) {
4841 else if (CFVal && CFVal == RHSVal && AArch64CC ==
AArch64CC::NE)
4844 assert (CTVal && CFVal &&
"Expected constant operands for CSNEG.");
4858 return DAG.
getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
4877 if (RHSVal && RHSVal->
isZero()) {
4885 CFVal && CFVal->
isZero() &&
4914 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
4946 CC = cast<CondCodeSDNode>(CCVal->
getOperand(2))->
get();
4952 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
4963 return getAddrLarge(JT, DAG);
4965 return getAddrTiny(JT, DAG);
4967 return getAddr(JT, DAG);
4977 int JTI = cast<JumpTableSDNode>(JT.
getNode())->getIndex();
4993 return getGOT(CP, DAG);
4995 return getAddrLarge(CP, DAG);
4997 return getAddrTiny(CP, DAG);
4999 return getAddr(CP, DAG);
5008 return getAddrLarge(BA, DAG);
5010 return getAddrTiny(BA, DAG);
5012 return getAddr(BA, DAG);
5117 return LowerWin64_VASTART(Op, DAG);
5119 return LowerDarwin_VASTART(Op, DAG);
5121 return LowerAAPCS_VASTART(Op, DAG);
5129 unsigned VaListSize =
5131 const Value *DestSV = cast<SrcValueSDNode>(Op.
getOperand(3))->getValue();
5132 const Value *SrcSV = cast<SrcValueSDNode>(Op.
getOperand(4))->getValue();
5143 "automatic va_arg instruction only works on Darwin");
5157 assert(((Align & (Align - 1)) == 0) &&
"Expected Align to be a power of 2");
5173 bool NeedFPTrunc =
false;
5209 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
5230 unsigned AArch64TargetLowering::getRegisterByName(
const char* RegName,
EVT VT,
5233 .Case(
"sp", AArch64::SP)
5234 .
Case(
"x1", AArch64::X1)
5235 .
Case(
"w1", AArch64::W1)
5236 .
Case(
"x2", AArch64::X2)
5237 .
Case(
"w2", AArch64::W2)
5238 .
Case(
"x3", AArch64::X3)
5239 .
Case(
"w3", AArch64::W3)
5240 .
Case(
"x4", AArch64::X4)
5241 .
Case(
"w4", AArch64::W4)
5242 .
Case(
"x5", AArch64::X5)
5243 .
Case(
"w5", AArch64::W5)
5244 .
Case(
"x6", AArch64::X6)
5245 .
Case(
"w6", AArch64::W6)
5246 .
Case(
"x7", AArch64::X7)
5247 .
Case(
"w7", AArch64::W7)
5248 .
Case(
"x18", AArch64::X18)
5249 .
Case(
"w18", AArch64::W18)
5250 .
Case(
"x20", AArch64::X20)
5251 .
Case(
"w20", AArch64::W20)
5253 if (((Reg == AArch64::X1 || Reg == AArch64::W1) &&
5255 ((Reg == AArch64::X2 || Reg == AArch64::W2) &&
5257 ((Reg == AArch64::X3 || Reg == AArch64::W3) &&
5259 ((Reg == AArch64::X4 || Reg == AArch64::W4) &&
5261 ((Reg == AArch64::X5 || Reg == AArch64::W5) &&
5263 ((Reg == AArch64::X6 || Reg == AArch64::W6) &&
5265 ((Reg == AArch64::X7 || Reg == AArch64::W7) &&
5267 ((Reg == AArch64::X18 || Reg == AArch64::W18) &&
5269 ((Reg == AArch64::X20 || Reg == AArch64::W20) &&
5300 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
5302 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5310 unsigned Reg = MF.
addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
5340 HiBitsForLo, CCVal, Cmp);
5352 SDValue LoForBigShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
5354 LoForNormalShift, CCVal, Cmp);
5358 SDValue HiForNormalShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ShAmt);
5361 ? DAG.
getNode(Opc, dl, VT, ShOpHi,
5365 HiForNormalShift, CCVal, Cmp);
5395 LoBitsForHi, CCVal, Cmp);
5409 HiForNormalShift, CCVal, Cmp);
5416 LoForNormalShift, CCVal, Cmp);
5438 bool IsLegal =
false;
5451 <<
" imm value: " << ImmStrVal <<
"\n");
5456 <<
" imm value: " << ImmStrVal <<
"\n");
5472 if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified)
5480 return DAG.
getNode(Opcode,
SDLoc(Operand), VT, Operand);
5490 bool Reciprocal)
const {
5492 (Enabled == ReciprocalEstimate::Unspecified && Subtarget->
useRSqrt()))
5503 for (
int i = ExtraSteps; i > 0; --i) {
5518 VT, Eq, Operand, Estimate);
5530 int &ExtraSteps)
const {
5542 for (
int i = ExtraSteps; i > 0; --i) {
5582 const char *AArch64TargetLowering::LowerXConstraint(
EVT ConstraintVT)
const {
5607 AArch64TargetLowering::getConstraintType(
StringRef Constraint)
const {
5608 if (Constraint.
size() == 1) {
5609 switch (Constraint[0]) {
5632 AArch64TargetLowering::getSingleConstraintMatchWeight(
5633 AsmOperandInfo &
info,
const char *constraint)
const {
5635 Value *CallOperandVal = info.CallOperandVal;
5638 if (!CallOperandVal)
5642 switch (*constraint) {
5658 std::pair<unsigned, const TargetRegisterClass *>
5659 AArch64TargetLowering::getRegForInlineAsmConstraint(
5661 if (Constraint.
size() == 1) {
5662 switch (Constraint[0]) {
5665 return std::make_pair(0U, &AArch64::GPR64commonRegClass);
5666 return std::make_pair(0U, &AArch64::GPR32commonRegClass);
5671 return std::make_pair(0U, &AArch64::FPR16RegClass);
5673 return std::make_pair(0U, &AArch64::FPR32RegClass);
5675 return std::make_pair(0U, &AArch64::FPR64RegClass);
5677 return std::make_pair(0U, &AArch64::FPR128RegClass);
5685 return std::make_pair(0U, &AArch64::FPR128_loRegClass);
5690 return std::make_pair(
unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
5694 std::pair<unsigned, const TargetRegisterClass *> Res;
5700 if ((Size == 4 || Size == 5) && Constraint[0] ==
'{' &&
5701 tolower(Constraint[1]) ==
'v' && Constraint[Size - 1] ==
'}') {
5704 if (!Failed && RegNo >= 0 && RegNo <= 31) {
5709 Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
5710 Res.second = &AArch64::FPR64RegClass;
5712 Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
5713 Res.second = &AArch64::FPR128RegClass;
5719 if (Res.second && !Subtarget->
hasFPARMv8() &&
5720 !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) &&
5721 !AArch64::GPR64allRegClass.hasSubClassEq(Res.second))
5722 return std::make_pair(0U,
nullptr);
5729 void AArch64TargetLowering::LowerAsmOperandForConstraint(
5730 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
5735 if (Constraint.length() != 1)
5738 char ConstraintLetter = Constraint[0];
5739 switch (ConstraintLetter) {
5760 GA->getValueType(0));
5762 dyn_cast<BlockAddressSDNode>(Op)) {
5766 dyn_cast<ExternalSymbolSDNode>(Op)) {
5786 switch (ConstraintLetter) {
5794 if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
5799 if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
5831 if ((CVal & 0xFFFF) == CVal)
5833 if ((CVal & 0xFFFF0000ULL) == CVal)
5836 if ((NCVal & 0xFFFFULL) == NCVal)
5838 if ((NCVal & 0xFFFF0000ULL) == NCVal)
5845 if ((CVal & 0xFFFFULL) == CVal)
5847 if ((CVal & 0xFFFF0000ULL) == CVal)
5849 if ((CVal & 0xFFFF00000000ULL) == CVal)
5851 if ((CVal & 0xFFFF000000000000ULL) == CVal)
5853 uint64_t NCVal = ~CVal;
5854 if ((NCVal & 0xFFFFULL) == NCVal)
5856 if ((NCVal & 0xFFFF0000ULL) == NCVal)
5858 if ((NCVal & 0xFFFF00000000ULL) == NCVal)
5860 if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
5874 Ops.push_back(Result);
5922 LLVM_DEBUG(
dbgs() <<
"AArch64TargetLowering::ReconstructShuffle\n");
5927 struct ShuffleSourceInfo {
5942 ShuffleSourceInfo(
SDValue Vec)
5944 ShuffleVec(Vec), WindowBase(0), WindowScale(1) {}
5952 for (
unsigned i = 0; i < NumElts; ++i) {
5959 dbgs() <<
"Reshuffle failed: " 5960 "a shuffle can only come from building a vector from " 5961 "various elements of other vectors, provided their " 5962 "indices are constant\n");
5973 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
5978 if (Sources.
size() > 2) {
5980 dbgs() <<
"Reshuffle failed: currently only do something sane when at " 5981 "most two source vectors are involved\n");
5988 for (
auto &
Source : Sources) {
5989 EVT SrcEltTy =
Source.Vec.getValueType().getVectorElementType();
5990 if (SrcEltTy.
bitsLT(SmallestEltTy)) {
5991 SmallestEltTy = SrcEltTy;
5994 unsigned ResMultiplier =
6002 for (
auto &Src : Sources) {
6003 EVT SrcVT = Src.ShuffleVec.getValueType();
6020 DAG.
getUNDEF(Src.ShuffleVec.getValueType()));
6026 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6028 dbgs() <<
"Reshuffle failed: span too large for a VEXT to cope\n");
6032 if (Src.MinElt >= NumSrcElts) {
6037 Src.WindowBase = -NumSrcElts;
6038 }
else if (Src.MaxElt < NumSrcElts) {
6056 Src.WindowBase = -Src.MinElt;
6063 for (
auto &Src : Sources) {
6065 if (SrcEltTy == SmallestEltTy)
6070 Src.WindowBase *= Src.WindowScale;
6076 assert(Src.ShuffleVec.getValueType() == ShuffleVT););
6087 int EltNo = cast<ConstantSDNode>(Entry.
getOperand(1))->getSExtValue();
6095 int LanesDefined = BitsDefined / BitsPerShuffleLane;
6099 int *LaneMask = &
Mask[i * ResMultiplier];
6101 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
6102 ExtractBase += NumElts * (Src - Sources.begin());
6103 for (
int j = 0; j < LanesDefined; ++j)
6104 LaneMask[j] = ExtractBase + j;
6109 LLVM_DEBUG(
dbgs() <<
"Reshuffle failed: illegal shuffle mask\n");
6114 for (
unsigned i = 0; i < Sources.size(); ++i)
6115 ShuffleOps[i] = Sources[i].ShuffleVec;
6118 ShuffleOps[1],
Mask);
6122 dbgs() <<
"Reshuffle, creating node: "; V.
dump(););
6141 unsigned ExpectedElt = Imm;
6142 for (
unsigned i = 1; i < NumElts; ++i) {
6146 if (ExpectedElt == NumElts)
6151 if (ExpectedElt != static_cast<unsigned>(M[i]))
6163 const int *FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
6168 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1);
6172 [&](
int Elt) {
return Elt != ExpectedElt++ && Elt != -1;});
6173 if (FirstWrongElt != M.
end())
6202 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
6203 "Only possible block sizes for REV are: 16, 32, 64");
6210 unsigned BlockElts = M[0] + 1;
6213 BlockElts = BlockSize / EltSz;
6215 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
6218 for (
unsigned i = 0; i < NumElts; ++i) {
6221 if ((
unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
6230 WhichResult = (M[0] == 0 ? 0 : 1);
6231 unsigned Idx = WhichResult * NumElts / 2;
6232 for (
unsigned i = 0; i != NumElts; i += 2) {
6233 if ((M[i] >= 0 && (
unsigned)M[i] != Idx) ||
6234 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != Idx + NumElts))
6244 WhichResult = (M[0] == 0 ? 0 : 1);
6245 for (
unsigned i = 0; i != NumElts; ++i) {
6248 if ((
unsigned)M[i] != 2 * i + WhichResult)
6257 WhichResult = (M[0] == 0 ? 0 : 1);
6258 for (
unsigned i = 0; i < NumElts; i += 2) {
6259 if ((M[i] >= 0 && (
unsigned)M[i] != i + WhichResult) ||
6260 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != i + NumElts + WhichResult))
6271 WhichResult = (M[0] == 0 ? 0 : 1);
6272 unsigned Idx = WhichResult * NumElts / 2;
6273 for (
unsigned i = 0; i != NumElts; i += 2) {
6274 if ((M[i] >= 0 && (
unsigned)M[i] != Idx) ||
6275 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != Idx))
6288 WhichResult = (M[0] == 0 ? 0 : 1);
6289 for (
unsigned j = 0; j != 2; ++j) {
6290 unsigned Idx = WhichResult;
6291 for (
unsigned i = 0; i != Half; ++i) {
6292 int MIdx = M[i + j * Half];
6293 if (MIdx >= 0 && (
unsigned)MIdx != Idx)
6307 WhichResult = (M[0] == 0 ? 0 : 1);
6308 for (
unsigned i = 0; i < NumElts; i += 2) {
6309 if ((M[i] >= 0 && (
unsigned)M[i] != i + WhichResult) ||
6310 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != i + WhichResult))
6317 bool &DstIsLeft,
int &Anomaly) {
6318 if (M.
size() !=
static_cast<size_t>(NumInputElements))
6321 int NumLHSMatch = 0, NumRHSMatch = 0;
6322 int LastLHSMismatch = -1, LastRHSMismatch = -1;
6324 for (
int i = 0; i < NumInputElements; ++i) {
6334 LastLHSMismatch = i;
6336 if (M[i] == i + NumInputElements)
6339 LastRHSMismatch = i;
6342 if (NumLHSMatch == NumInputElements - 1) {
6344 Anomaly = LastLHSMismatch;
6346 }
else if (NumRHSMatch == NumInputElements - 1) {
6348 Anomaly = LastRHSMismatch;
6361 for (
int I = 0,
E = NumElts / 2;
I !=
E;
I++) {
6366 int Offset = NumElts / 2;
6367 for (
int I = NumElts / 2,
E = NumElts;
I !=
E;
I++) {
6368 if (Mask[
I] !=
I + SplitLHS * Offset)
6409 unsigned OpNum = (PFEntry >> 26) & 0x0F;
6410 unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
6411 unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
6431 if (OpNum == OP_COPY) {
6432 if (LHSID == (1 * 9 + 2) * 9 + 3)
6434 assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 &&
"Illegal OP_COPY!");
6478 return DAG.
getNode(Opcode, dl, VT, OpLHS, Lane);
6483 unsigned Imm = (OpNum - OP_VEXT1 + 1) *
getExtFactor(OpLHS);
6519 for (
int Val : ShuffleMask) {
6520 for (
unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
6521 unsigned Offset = Byte + Val * BytesPerElt;
6527 unsigned IndexLen = 8;
6546 if (IndexLen == 8) {
6621 Lane += cast<ConstantSDNode>(V1.
getOperand(1))->getZExtValue();
6640 bool ReverseEXT =
false;
6642 if (
isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
6654 unsigned WhichResult;
6655 if (
isZIPMask(ShuffleMask, VT, WhichResult)) {
6659 if (
isUZPMask(ShuffleMask, VT, WhichResult)) {
6663 if (
isTRNMask(ShuffleMask, VT, WhichResult)) {
6687 if (
isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
6692 int SrcLane = ShuffleMask[Anomaly];
6693 if (SrcLane >= NumInputElements) {
6714 unsigned PFIndexes[4];
6715 for (
unsigned i = 0; i != 4; ++i) {
6716 if (ShuffleMask[i] < 0)
6719 PFIndexes[i] = ShuffleMask[i];
6723 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
6724 PFIndexes[2] * 9 + PFIndexes[3];
6726 unsigned Cost = (PFEntry >> 30);
6738 APInt SplatBits, SplatUndef;
6739 unsigned SplatBitSize;
6741 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6744 for (
unsigned i = 0; i < NumSplats; ++i) {
6745 CnstBits <<= SplatBitSize;
6746 UndefBits <<= SplatBitSize;
6748 UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.
getSizeInBits());
6781 const SDValue *LHS =
nullptr) {
6786 bool isAdvSIMDModImm =
false;
6806 if (isAdvSIMDModImm) {
6811 Mov = DAG.
getNode(NewOp, dl, MovTy, *LHS,
6815 Mov = DAG.
getNode(NewOp, dl, MovTy,
6829 const SDValue *LHS =
nullptr) {
6834 bool isAdvSIMDModImm =
false;
6846 if (isAdvSIMDModImm) {
6851 Mov = DAG.
getNode(NewOp, dl, MovTy, *LHS,
6855 Mov = DAG.
getNode(NewOp, dl, MovTy,
6873 bool isAdvSIMDModImm =
false;
6885 if (isAdvSIMDModImm) {
6926 bool isAdvSIMDModImm =
false;
6938 if (isAdvSIMDModImm) {
6977 UndefBits = ~UndefBits;
6979 UndefBits, &LHS)) ||
6993 uint64_t &ConstVal) {
7002 for (
unsigned i = 1; i < NumElts; ++i)
7003 if (dyn_cast<ConstantSDNode>(Bvec->
getOperand(i)) != FirstElt)
7015 unsigned IID = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
7063 if (C2 > ElemSizeInBits)
7065 unsigned ElemMask = (1 << ElemSizeInBits) - 1;
7066 if ((C1 & ElemMask) != (~C2 & ElemMask))
7121 UndefBits, &LHS)) ||
7149 if (
auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
7151 CstLane->getZExtValue());
7153 }
else if (Lane.getNode()->isUndef()) {
7157 "Unexpected BUILD_VECTOR operand type");
7186 DefBits = UndefBits;
7195 DefBits = ~UndefBits;
7221 Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue());
7222 if (Val.isNullValue() || Val.isAllOnesValue())
7244 bool isOnlyLowElement =
true;
7245 bool usesOnlyOneValue =
true;
7246 bool usesOnlyOneConstantValue =
true;
7248 bool AllLanesExtractElt =
true;
7249 unsigned NumConstantLanes = 0;
7252 for (
unsigned i = 0; i < NumElts; ++i) {
7255 AllLanesExtractElt =
false;
7259 isOnlyLowElement =
false;
7260 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
7263 if (isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V)) {
7267 else if (ConstantValue != V)
7268 usesOnlyOneConstantValue =
false;
7273 else if (V != Value)
7274 usesOnlyOneValue =
false;
7279 dbgs() <<
"LowerBUILD_VECTOR: value undefined, creating undef node\n");
7286 if (isOnlyLowElement && !(NumElts == 1 && isa<ConstantSDNode>(Value))) {
7287 LLVM_DEBUG(
dbgs() <<
"LowerBUILD_VECTOR: only low element used, creating 1 " 7288 "SCALAR_TO_VECTOR node\n");
7292 if (AllLanesExtractElt) {
7293 SDNode *Vector =
nullptr;
7298 for (
unsigned i = 0; i < NumElts; ++i) {
7313 }
else if (Vector != N0.
getNode()) {
7326 if (Val - 1 == 2 * i) {
7355 if (usesOnlyOneValue) {
7360 dbgs() <<
"LowerBUILD_VECTOR: use DUP for non-constant splats\n");
7370 dbgs() <<
"LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " 7376 return DAG.
getNode(Opcode, dl, VT, Value, Lane);
7383 "Unsupported floating-point vector type");
7385 dbgs() <<
"LowerBUILD_VECTOR: float constant splats, creating int " 7386 "BITCASTS, and try again\n");
7388 for (
unsigned i = 0; i < NumElts; ++i)
7392 LLVM_DEBUG(
dbgs() <<
"LowerBUILD_VECTOR: trying to lower new vector: ";
7394 Val = LowerBUILD_VECTOR(Val, DAG);
7404 if (NumConstantLanes > 0 && usesOnlyOneConstantValue) {
7415 for (
unsigned i = 0; i < NumElts; ++i) {
7418 if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V))
7429 dbgs() <<
"LowerBUILD_VECTOR: all elements are constant, use default " 7446 if (!isConstant && !usesOnlyOneValue) {
7448 dbgs() <<
"LowerBUILD_VECTOR: alternatives failed, creating sequence " 7449 "of INSERT_VECTOR_ELT\n");
7466 LLVM_DEBUG(
dbgs() <<
"Creating node for op0, it is not undefined:\n");
7471 <<
"Creating nodes for the other vector elements:\n";);
7472 for (; i < NumElts; ++i) {
7483 dbgs() <<
"LowerBUILD_VECTOR: use default expansion, failed to find " 7484 "better alternative\n");
7488 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(
SDValue Op,
7495 if (!CI || CI->
getZExtValue() >= VT.getVectorNumElements())
7522 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(
SDValue Op,
7529 if (!CI || CI->
getZExtValue() >= VT.getVectorNumElements())
7558 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(
SDValue Op,
7588 unsigned PFIndexes[4];
7589 for (
unsigned i = 0; i != 4; ++i) {
7593 PFIndexes[i] = M[i];
7597 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
7598 PFIndexes[2] * 9 + PFIndexes[3];
7600 unsigned Cost = (PFEntry >> 30);
7608 unsigned DummyUnsigned;
7612 isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
7631 APInt SplatBits, SplatUndef;
7632 unsigned SplatBitSize;
7634 if (!BVN || !BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
7635 HasAnyUndefs, ElementBits) ||
7636 SplatBitSize > ElementBits)
7647 assert(VT.
isVector() &&
"vector shift count is not a vector type");
7651 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
7658 assert(VT.
isVector() &&
"vector shift count is not a vector type");
7662 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
7665 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(
SDValue Op,
7708 return NegShiftLeft;
7719 "function only supposed to emit natural comparisons");
7725 bool IsZero = IsCnst && (CnstBits == 0);
7825 const bool FullFP16 =
7917 if (!Subtarget.hasLSE())
7934 if (!Subtarget.hasLSE())
7948 SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(
7956 if (Subtarget->hasCustomCallingConv())
7977 AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(
SDValue Op,
7979 assert(Subtarget->isTargetWindows() &&
7980 "Only Windows alloca probing supported");
7986 unsigned Align = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
7990 "no-stack-arg-probe")) {
8004 Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG);
8027 unsigned Intrinsic)
const {
8029 switch (Intrinsic) {
8044 uint64_t NumElts = DL.getTypeSizeInBits(I.
getType()) / 64;
8064 unsigned NumElts = 0;
8069 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
8150 if (ShiftAmount ==
Log2_32(LoadBytes))
8163 return NumBits1 > NumBits2;
8170 return NumBits1 > NumBits2;
8177 if (I->
getOpcode() != Instruction::FMul)
8186 !(User->
getOpcode() == Instruction::FSub ||
8187 User->
getOpcode() == Instruction::FAdd))
8207 return NumBits1 == 32 && NumBits2 == 64;
8214 return NumBits1 == 32 && NumBits2 == 64;
8232 bool AArch64TargetLowering::isExtFreeImpl(
const Instruction *
Ext)
const {
8233 if (isa<FPExtInst>(Ext))
8240 for (
const Use &U : Ext->
uses()) {
8245 const Instruction *Instr = cast<Instruction>(U.getUser());
8249 case Instruction::Shl:
8253 case Instruction::GetElementPtr: {
8256 std::advance(GTI, U.getOperandNo()-1);
8266 if (ShiftAmt == 0 || ShiftAmt > 4)
8270 case Instruction::Trunc:
8287 unsigned &RequiredAligment)
const {
8292 RequiredAligment = 0;
8294 return NumBits == 32 || NumBits == 64;
8324 if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64)
8329 return VecSize == 64 || VecSize % 128 == 0;
8347 "Invalid interleave factor");
8348 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
8350 "Unmatched number of shufflevectors and indices");
8391 Type *Tys[2] = {VecTy, PtrTy};
8403 for (
unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
8415 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
8417 unsigned Index = Indices[i];
8426 SubVecs[SVI].push_back(SubVec);
8435 auto &SubVec = SubVecs[SVI];
8438 SVI->replaceAllUsesWith(WideVec);
8472 unsigned Factor)
const {
8474 "Invalid interleave factor");
8478 "Invalid interleaved store");
8515 if (NumStores > 1) {
8518 LaneLen /= NumStores;
8532 Type *Tys[2] = {SubVecTy, PtrTy};
8539 for (
unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
8544 for (
unsigned i = 0; i < Factor; i++) {
8545 unsigned IdxI = StoreCount * LaneLen * Factor + i;
8546 if (
Mask[IdxI] >= 0) {
8550 unsigned StartMask = 0;
8551 for (
unsigned j = 1; j < LaneLen; j++) {
8552 unsigned IdxJ = StoreCount * LaneLen * Factor + j;
8553 if (
Mask[IdxJ * Factor + IdxI] >= 0) {
8554 StartMask =
Mask[IdxJ * Factor + IdxI] - IdxJ;
8580 unsigned AlignCheck) {
8581 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
8582 (DstAlign == 0 || DstAlign % AlignCheck == 0));
8586 unsigned SrcAlign,
bool IsMemset,
8592 bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
8593 bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
8597 bool IsSmallMemset = IsMemset && Size < 32;
8598 auto AlignmentIsAcceptable = [&](
EVT VT,
unsigned AlignCheck) {
8599 if (
memOpAlign(SrcAlign, DstAlign, AlignCheck))
8605 if (CanUseNEON && IsMemset && !IsSmallMemset &&
8608 if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(
MVT::f128, 16))
8610 if (Size >= 8 && AlignmentIsAcceptable(
MVT::i64, 8))
8612 if (Size >= 4 && AlignmentIsAcceptable(
MVT::i32, 4))
8619 if (Immed == std::numeric_limits<int64_t>::min()) {
8621 <<
": avoid UB for INT64_MIN\n");
8626 bool IsLegal = ((Immed >> 12) == 0 ||
8627 ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
8629 <<
" legal add imm: " << (IsLegal ?
"yes" :
"no") <<
"\n");
8661 uint64_t NumBytes = 0;
8664 NumBytes = NumBits / 8;
8673 if (isInt<9>(Offset))
8677 unsigned shift =
Log2_64(NumBytes);
8678 if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
8680 (Offset >> shift) << shift == Offset)
8687 return AM.
Scale == 1 || (AM.
Scale > 0 && (uint64_t)AM.
Scale == NumBytes);
8697 unsigned AS)
const {
8734 static const MCPhysReg ScratchRegs[] = {
8735 AArch64::X16, AArch64::X17, AArch64::LR, 0
8770 if ((int64_t)Val < 0)
8773 Val &= (1LL << 32) - 1;
8776 unsigned Shift = (63 - LZ) / 16;
8782 unsigned Index)
const {
8810 if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
8857 AArch64TargetLowering::BuildSDIVPow2(
SDNode *N,
const APInt &Divisor,
8867 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
8925 unsigned TrailingZeroes = ConstValue.countTrailingZeros();
8926 if (TrailingZeroes) {
8940 APInt ShiftedConstValue = ConstValue.
ashr(TrailingZeroes);
8942 unsigned ShiftAmt, AddSubOpc;
8944 bool ShiftValUseIsN0 =
true;
8946 bool NegateResult =
false;
8948 if (ConstValue.isNonNegative()) {
8952 APInt SCVMinus1 = ShiftedConstValue - 1;
8953 APInt CVPlus1 = ConstValue + 1;
8965 APInt CVNegPlus1 = -ConstValue + 1;
8966 APInt CVNegMinus1 = -ConstValue - 1;
8970 ShiftValUseIsN0 =
false;
8974 NegateResult =
true;
8984 SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0;
8985 SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal;
8986 SDValue Res = DAG.
getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1);
8987 assert(!(NegateResult && TrailingZeroes) &&
8988 "NegateResult and TrailingZeroes cannot both be true for now.");
9025 if (!BV->isConstant())
9030 EVT IntVT = BV->getValueType(0);
9098 if (!isa<BuildVectorSDNode>(ConstVec))
9103 if (FloatBits != 32 && FloatBits != 64)
9108 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
9112 if (IntBits > FloatBits)
9117 int32_t
Bits = IntBits == 64 ? 64 : 32;
9119 if (C == -1 || C == 0 || C > Bits)
9139 "Illegal vector type after legalization");
9150 if (IntBits < FloatBits)
9172 if (!isa<BuildVectorSDNode>(ConstVec))
9177 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
9182 if (FloatBits != 32 && FloatBits != 64)
9186 if (IntBits > FloatBits)
9192 if (C == -1 || C == 0 || C > FloatBits)
9214 if (IntBits < FloatBits)
9262 bool LHSFromHi =
false;
9268 bool RHSFromHi =
false;
9274 if (LHSFromHi == RHSFromHi)
9309 uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL <<
Bits) - 1);
9310 for (
int i = 1; i >= 0; --i)
9311 for (
int j = 1; j >= 0; --j) {
9317 bool FoundMatch =
true;
9322 CN0->
getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
9370 uint64_t ShiftAmt =
C->getZExtValue();
9371 if (VT ==
MVT::i32 && ShiftAmt == 16 &&
9374 if (VT ==
MVT::i64 && ShiftAmt == 32 &&
9411 uint64_t idx = cast<ConstantSDNode>(Op0->
getOperand(1))->getZExtValue();
9416 if (idx != AArch64::dsub)
9433 dbgs() <<
"aarch64-lower: bitcast extract_subvector simplification\n");
9480 for (
size_t i = 0; i <
Mask.size(); ++i)
9522 dbgs() <<
"aarch64-lower: concat_vectors bitcast simplification\n");
9564 "unexpected vector size on extract_vector_elt!");
9690 cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue());
9699 if (!TValue || !FValue)
9703 if (!TValue->
isOne()) {
9709 return TValue->
isOne() && FValue->isNullValue();
9835 "unexpected shape for long operation");
9858 int64_t ShiftAmount;
9860 APInt SplatValue, SplatUndef;
9861 unsigned SplatBitSize;
9863 if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
9864 HasAnyUndefs, ElemBits) ||
9865 SplatBitSize != ElemBits)
9870 ShiftAmount = CVN->getSExtValue();
9881 IsRightShift =
false;
9885 IsRightShift =
false;
9889 IsRightShift =
true;
9893 IsRightShift =
true;
9897 IsRightShift =
false;
9901 if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(
int)ElemBits) {
9905 }
else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
10076 assert(!(NumElements & 1) &&
"Splitting vector, but not in half!");
10095 SDValue SplatVal,
unsigned NumVecElts) {
10106 uint64_t BaseOffset = 0;
10114 if (BasePtr->getOpcode() ==
ISD::ADD &&
10115 isa<ConstantSDNode>(BasePtr->getOperand(1))) {
10116 BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
10117 BasePtr = BasePtr->getOperand(0);
10120 unsigned Offset = EltOffset;
10121 while (--NumVecElts) {
10122 unsigned Alignment =
MinAlign(OrigAlignment, Offset);
10127 PtrInfo.getWithOffset(Offset), Alignment,
10129 Offset += EltOffset;
10156 if (!(((NumVecElts == 2 || NumVecElts == 3) &&
10158 ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) &&
10180 if (Offset < -512 || Offset > 504)
10184 for (
int I = 0;
I < NumVecElts; ++
I) {
10196 ZeroReg = AArch64::WZR;
10199 ZeroReg = AArch64::XZR;
10223 if (NumVecElts != 4 && NumVecElts != 2)
10234 std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1);
10236 for (
unsigned I = 0;
I < NumVecElts; ++
I) {
10252 if (IndexVal >= NumVecElts)
10254 IndexNotInserted.reset(IndexVal);
10259 if (IndexNotInserted.any())
10282 return ReplacedZeroSplat;
10313 return ReplacedSplat;
10346 unsigned LoadIdx = IsLaneOp ? 1 : 0;
10371 if (UI.getUse().getResNo() == 1)
10384 || UI.getUse().getResNo() != Addr.
getResNo())
10390 uint32_t IncVal = CInc->getZExtValue();
10392 if (IncVal != NumBytes)
10449 if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
10487 UI.getUse().getResNo() != Addr.
getResNo())
10502 bool IsStore =
false;
10503 bool IsLaneOp =
false;
10504 bool IsDupOp =
false;
10505 unsigned NewOpc = 0;
10506 unsigned NumVecs = 0;
10507 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
10511 NumVecs = 2;
break;
10513 NumVecs = 3;
break;
10515 NumVecs = 4;
break;
10517 NumVecs = 2; IsStore =
true;
break;
10519 NumVecs = 3; IsStore =
true;
break;
10521 NumVecs = 4; IsStore =
true;
break;
10523 NumVecs = 2;
break;
10525 NumVecs = 3;
break;
10527 NumVecs = 4;
break;
10529 NumVecs = 2; IsStore =
true;
break;
10531 NumVecs = 3; IsStore =
true;
break;
10533 NumVecs = 4; IsStore =
true;
break;
10535 NumVecs = 2; IsDupOp =
true;
break;
10537 NumVecs = 3; IsDupOp =
true;
break;
10539 NumVecs = 4; IsDupOp =
true;
break;
10541 NumVecs = 2; IsLaneOp =
true;
break;
10543 NumVecs = 3; IsLaneOp =
true;
break;
10545 NumVecs = 4; IsLaneOp =
true;
break;
10547 NumVecs = 2; IsStore =
true; IsLaneOp =
true;
break;
10549 NumVecs = 3; IsStore =
true; IsLaneOp =
true;
break;
10551 NumVecs = 4; IsStore =
true; IsLaneOp =
true;
break;
10563 uint32_t IncVal = CInc->getZExtValue();
10565 if (IsLaneOp || IsDupOp)
10567 if (IncVal != NumBytes)
10574 if (IsLaneOp || IsStore)
10575 for (
unsigned i = 2; i < AddrOpIdx; ++i)
10582 unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
10584 for (n = 0; n < NumResultVecs; ++n)
10596 std::vector<SDValue> NewResults;
10597 for (
unsigned i = 0; i < NumResultVecs; ++i) {
10598 NewResults.push_back(
SDValue(UpdN.getNode(), i));
10600 NewResults.push_back(
SDValue(UpdN.getNode(), NumResultVecs + 1));
10647 1LL << (width - 1);
10717 int CompConstant) {
10721 int MaxUInt = (1 << width);
10729 AddConstant -= (1 << (width-1));
10734 if ((AddConstant == 0) ||
10735 (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
10736 (AddConstant >= 0 && CompConstant < 0) ||
10737 (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
10742 if ((AddConstant == 0) ||
10743 (AddConstant >= 0 && CompConstant <= 0) ||
10744 (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
10749 if ((AddConstant >= 0 && CompConstant < 0) ||
10750 (AddConstant <= 0 && CompConstant >= -1 &&
10751 CompConstant < AddConstant + MaxUInt))
10756 if ((AddConstant == 0) ||
10757 (AddConstant > 0 && CompConstant <= 0) ||
10758 (AddConstant < 0 && CompConstant <= AddConstant))
10763 if ((AddConstant >= 0 && CompConstant <= 0) ||
10764 (AddConstant <= 0 && CompConstant >= 0 &&
10765 CompConstant <= AddConstant + MaxUInt))
10770 if ((AddConstant > 0 && CompConstant < 0) ||
10771 (AddConstant < 0 && CompConstant >= 0 &&
10772 CompConstant < AddConstant + MaxUInt) ||
10773 (AddConstant >= 0 && CompConstant >= 0 &&
10774 CompConstant >= AddConstant) ||
10775 (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
10794 unsigned CmpIndex) {
10795 unsigned CC = cast<ConstantSDNode>(N->
getOperand(CCIndex))->getSExtValue();
10797 unsigned CondOpcode = SubsNode->
getOpcode();
10806 unsigned MaskBits = 0;
10812 uint32_t CNV = CN->getZExtValue();
10815 else if (CNV == 65535)
10836 if (!isa<ConstantSDNode>(AddInputValue2.
getNode()) ||
10837 !isa<ConstantSDNode>(SubsInputValue.
getNode()))
10848 cast<ConstantSDNode>(AddInputValue2.
getNode())->getSExtValue(),
10849 cast<ConstantSDNode>(SubsInputValue.
getNode())->getSExtValue()))
10882 assert(isa<ConstantSDNode>(CCVal) &&
"Expected a ConstantSDNode here!");
10883 unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
10900 "Expected the value type to be the same for both operands!");
10961 if ((
C->getZExtValue() >>
Bit) & 1)
10967 if (
C->getZExtValue() <= Bit &&
10968 (Bit -
C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
10969 Bit = Bit -
C->getZExtValue();
10976 Bit = Bit +
C->getZExtValue();
10977 if (Bit >= Op->getValueType(0).getSizeInBits())
10978 Bit = Op->getValueType(0).getSizeInBits() - 1;
10983 if ((Bit +
C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
10984 Bit = Bit +
C->getZExtValue();
10991 if ((
C->getZExtValue() >>
Bit) & 1)
11001 unsigned Bit = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
11002 bool Invert =
false;
11006 if (TestSrc == NewTestSrc)
11049 cast<CondCodeSDNode>(N0.
getOperand(2))->
get());
11071 "Scalar-SETCC feeding SELECT has unexpected result type!");
11084 if (!ResVT.
isVector() || NumMaskElts == 0)
11132 auto *GN = cast<GlobalAddressSDNode>(
N);
11137 uint64_t MinOffset = -1ull;
11146 MinOffset = std::min(MinOffset,
C->getZExtValue());
11148 uint64_t
Offset = MinOffset + GN->getOffset();
11153 if (Offset <= uint64_t(GN->getOffset()))
11164 if (Offset >= (1 << 21))
11240 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
11277 bool AArch64TargetLowering::isUsedByReturnOnly(
SDNode *N,
11296 bool HasRet =
false;
11314 bool AArch64TargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
11330 int64_t RHSC = RHS->getSExtValue();
11332 RHSC = -(uint64_t)RHSC;
11333 if (!isInt<9>(RHSC))
11342 bool AArch64TargetLowering::getPreIndexedAddressParts(
SDNode *N,
SDValue &Base,
11349 VT =
LD->getMemoryVT();
11350 Ptr =
LD->getBasePtr();
11352 VT =
ST->getMemoryVT();
11353 Ptr =
ST->getBasePtr();
11358 if (!getIndexedAddressParts(Ptr.
getNode(), Base,
Offset, AM, IsInc, DAG))
11364 bool AArch64TargetLowering::getPostIndexedAddressParts(
11370 VT =
LD->getMemoryVT();
11371 Ptr =
LD->getBasePtr();
11373 VT =
ST->getMemoryVT();
11374 Ptr =
ST->getBasePtr();
11379 if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
11409 unsigned AcrossOp) {
11426 return std::make_pair(Lo, Hi);
11442 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
11452 "AtomicCmpSwap on types less than 128 should be legal");
11454 if (Subtarget->
hasLSE()) {
11469 Opcode = AArch64::CASPX;
11472 Opcode = AArch64::CASPAX;
11475 Opcode = AArch64::CASPLX;
11479 Opcode = AArch64::CASPALX;
11489 unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64;
11505 AArch64::CMP_SWAP_128,
SDLoc(N),
11516 void AArch64TargetLowering::ReplaceNodeResults(
11562 if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
11567 unsigned AArch64TargetLowering::combineRepeatedFPDivisors()
const {
11589 return Size == 128;
11616 if (Subtarget->hasLSE())
11631 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
11660 cast<PointerType>(Addr->
getType())->getElementType());
11706 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(
LLVMContext &,
11724 if (Subtarget->isTargetAndroid())
11729 if (Subtarget->isTargetFuchsia())
11737 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) {
11743 auto *SecurityCheckCookie = cast<Function>(
11748 SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg);
11756 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
11763 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
11772 if (Subtarget->isTargetAndroid())
11777 if (Subtarget->isTargetFuchsia())
11815 if (AArch64::GPR64RegClass.
contains(*
I))
11816 RC = &AArch64::GPR64RegClass;
11817 else if (AArch64::FPR64RegClass.
contains(*
I))
11818 RC = &AArch64::FPR64RegClass;
11830 "Function should be nounwind in insertCopiesSplitCSR!");
11836 for (
auto *Exit : Exits)
11838 TII->
get(TargetOpcode::COPY), *
I)
11862 if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
11868 void AArch64TargetLowering::finalizeLowering(
MachineFunction &MF)
const {
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs...
static bool isAdvSIMDModImmType6(uint64_t Imm)
Type * getVectorElementType() const
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
void setFrameAddressIsTaken(bool T)
void setAllowReassociation(bool b)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
constexpr bool isUInt< 32 >(uint64_t x)
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
Value * getValueOperand()
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
Helper structure to keep track of SetCC information.
static MVT getIntegerVT(unsigned BitWidth)
static bool isUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
A parsed version of the target data layout string in and methods for querying it. ...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
iterator_range< use_iterator > uses()
static bool isConstant(const MachineInstr &MI)
C - The default llvm calling convention, compatible with C.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
bool isLittleEndian() const
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
const GlobalValue * getGlobal() const
uint64_t getZExtValue() const
Get zero extended value.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static uint8_t encodeAdvSIMDModImmType3(uint64_t Imm)
This class represents an incoming formal argument to a Function.
LLVMContext & getContext() const
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
DiagnosticInfoOptimizationBase::Argument NV
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
void setVarArgsGPRSize(unsigned Size)
bool isMisaligned128StoreSlow() const
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG)
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static SDValue performNEONPostLDSTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Target-specific DAG combine function for NEON load/store intrinsics to merge base address updates...
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
This class represents lattice values for constants.
static bool isUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
static bool isTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v...
Type * getParamType(unsigned i) const
Parameter type accessors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool hasCustomCallingConv() const
static int getFP16Imm(const APInt &Imm)
getFP16Imm - Return an 8-bit floating-point version of the 16-bit floating-point value.
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
A Module instance is used to store all the information related to an LLVM module. ...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
static SDValue tryCombineToEXTR(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
EXTR instruction extracts a contiguous chunk of bits from two existing registers viewed as a high/low...
static bool isAdvSIMDModImmType12(uint64_t Imm)
int getVarArgsStackIndex() const
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
int getSplatIndex() const
static bool isAdvSIMDModImmType4(uint64_t Imm)
bool isVector() const
Return true if this is a vector value type.
static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static uint8_t encodeAdvSIMDModImmType1(uint64_t Imm)
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, unsigned Align=1, bool *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
const SDValue & getBasePtr() const
bool predictableSelectIsExpensive() const
Carry-setting nodes for multiple precision addition and subtraction.
void push_back(const T &Elt)
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
static CondCode getInvertedCondCode(CondCode Code)
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
void setVarArgsStackIndex(int Index)
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts, adds, and multiplies for this target.
MachineMemOperand::Flags flags
const SDValue & getValue() const
static bool isZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
This class represents a function call, abstracting a target machine's calling convention.
unsigned char ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG)
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
static MVT getFloatingPointVT(unsigned BitWidth)
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target...
The C convention as implemented on Windows/x86-64 and AArch64.
unsigned getVectorNumElements() const
const SDValue & getChain() const
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
static SDValue performSelectCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with the compare-mask instruct...
void setVarArgsFPRIndex(int Index)
static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, SelectionDAG &DAG)
unsigned getAlignment() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
GlobalVariable * getGlobalVariable(StringRef Name) const
Look up the specified global variable in the module symbol table.
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG)
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
static SDValue performExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue NormalizeBuildVector(SDValue Op, SelectionDAG &DAG)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
An instruction for reading from memory.
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG)
static unsigned getDUPLANEOp(EVT EltType)
static IntegerType * getInt64Ty(LLVMContext &C)
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL) const
Soften the operands of a comparison.
bool hasExternalWeakLinkage() const
static bool isAdvSIMDModImmType3(uint64_t Imm)
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
const SDNodeFlags getFlags() const
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isVectorTy() const
True if this is an instance of VectorType.
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG)
This defines the Use class.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, AArch64CC::CondCode CC, bool NoNans, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
bool shouldConsiderGEPOffsetSplit() const override
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
static SDValue tryCombineFixedPointConvert(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1 at the ...
unsigned getBitWidth() const
Get the bit width of this value.
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
void setVarArgsFPRSize(unsigned Size)
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
static std::pair< SDValue, SDValue > getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG)
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
bool isAnyArgRegReserved(const MachineFunction &MF) const
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
bool isCallingConvWin64(CallingConv::ID CC) const
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG)
bool isTargetCOFF() const
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
return AArch64::GPR64RegClass contains(Reg)
SDValue getExternalSymbol(const char *Sym, EVT VT)
bool isAllOnesValue() const
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
bool isTruncatingStore() const
Return true if the op does a truncation before store.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode *> &Visited, SmallVectorImpl< const SDNode *> &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
unsigned getFrameRegister(const MachineFunction &MF) const override
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Value * getArgOperand(unsigned i) const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
static bool isAdvSIMDModImmType7(uint64_t Imm)
static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG)
static bool isTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool isBeforeLegalize() const
cl::opt< bool > EnableAArch64ELFLocalDynamicTLSGeneration("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false))
int getVarArgsGPRIndex() const
A description of a memory reference used in the backend.
APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
static SDValue performTBZCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue performCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex)
bool isTargetDarwin() const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static void ReplaceCMP_SWAP_128Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
const HexagonInstrInfo * TII
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG)
static Value * UseTlsOffset(IRBuilder<> &IRB, unsigned Offset)
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Shift and rotation operations.
static SDValue performAddSubLongCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Class to represent struct types.
LLVMContext & getContext() const
Get the global data context.
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
A Use represents the edge between a Value definition and its users.
static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
Fold a floating-point multiply by power of two into floating-point to fixed-point conversion...
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
BinOp getOperation() const
static uint8_t encodeAdvSIMDModImmType6(uint64_t Imm)
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
bool isIntegerTy() const
True if this is an instance of IntegerType.
op_iterator op_end() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
uint64_t getConstantOperandVal(unsigned i) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Value * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Helper structure to keep track of a SET_CC lowered into AArch64 code.
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
This file contains the simple types necessary to represent the attributes associated with functions a...
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isInConsecutiveRegs() const
The memory access is dereferenceable (i.e., doesn't trap).
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool isTargetMachO() const
static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, SelectionDAG &DAG)
const DataLayout & getDataLayout() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
This class is used to represent EVT's, which are used to parameterize some operations.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
Turn vector tests of the signbit in the form of: xor (sra X, elt_size(X)-1), -1 into: cmge X...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
const BlockAddress * getBlockAddress() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
This is an SDNode representing atomic operations.
uint64_t getNumElements() const
LocInfo getLocInfo() const
static bool isAdvSIMDModImmType5(uint64_t Imm)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
static StructType * get(LLVMContext &Context, ArrayRef< Type *> Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
This file implements a class to represent arbitrary precision integral constant values and operations...
bool is64BitVector() const
Return true if this is a 64-bit vector type.
static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue CCOp, AArch64CC::CondCode Predicate, AArch64CC::CondCode OutCC, const SDLoc &DL, SelectionDAG &DAG)
can be transformed to: not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) (and (not (setCA (cmp A))...
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
AtomicOrdering
Atomic ordering for LLVM's memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
static const unsigned PerfectShuffleTable[6561+1]
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
int64_t getSExtValue() const
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
unsigned getScalarSizeInBits() const
void setArgumentStackToRestore(unsigned bytes)
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
int64_t getSExtValue() const
Get sign extended value.
Constant * createSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
static SDValue performConcatVectorsCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Type * getType() const
All values are typed, get the type of this value.
CATCHPAD - Represents a catchpad instruction.
MachineFunction & getMachineFunction() const
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
static bool isEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseEXT, unsigned &Imm)
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
void toString(SmallVectorImpl< char > &Str, unsigned FormatPrecision=0, unsigned FormatMaxPadding=3, bool TruncateZero=true) const
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
SDValue getRegisterMask(const uint32_t *RegMask)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
const AArch64RegisterInfo * getRegisterInfo() const override
static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
const TargetMachine & getTarget() const
BasicBlock * GetInsertBlock() const
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Simple integer binary arithmetic operators.
static bool isOverflowIntrOpRes(SDValue Op)
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
const APInt & getValue() const
Return the constant as an APInt value reference.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
static bool isAdvSIMDModImmType2(uint64_t Imm)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
AttributeList getAttributes() const
Return the attribute list for this Function.
An instruction for storing to memory.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
op_iterator op_begin() const
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
ArrayRef< SDUse > ops() const
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp, SelectionDAG &DAG)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static const MCPhysReg GPRArgRegs[]
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isXRegisterReserved(size_t i) const
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
amdgpu Simplify well known AMD library false Value * Callee
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
bool supportsAddressTopByteIgnored() const
CPU has TBI (top byte of addresses is ignored during HW address translation) and OS enables it...
static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG)
WidenVector - Given a value in the V64 register class, produce the equivalent value in the V128 regis...
MVT getVectorElementType() const
Value * getOperand(unsigned i) const
Analysis containing CSE Info
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
static bool isREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize...
static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG)
Class to represent pointers.
unsigned getByValSize() const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
static bool isEssentiallyExtractSubvector(SDValue N)
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
static SDValue performBRCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC)
Emit expression as a conjunction (a series of CCMP/CFCMP ops).
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool isTargetWindows() const
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
static bool isAdvSIMDModImmType9(uint64_t Imm)
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded...
bool isZero() const
Return true if the value is positive or negative zero.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool isOSWindows() const
Tests whether the OS is Windows.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
The memory access is volatile.
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
const SDValue & getBasePtr() const
A switch()-like statement whose cases are string literals.
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
int64_t getOffset() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
Control flow instructions. These all have token chains.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
bool requiresStrictAlign() const
unsigned const MachineRegisterInfo * MRI
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned countPopulation() const
Count the number of bits set.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
Value * concatenateVectors(IRBuilder<> &Builder, ArrayRef< Value *> Vecs)
Concatenate a list of vectors.
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
MachineBasicBlock * EmitLoweredCatchPad(MachineInstr &MI, MachineBasicBlock *BB) const
size_t size() const
size - Get the array size.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void incNumLocalDynamicTLSAccesses()
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
iterator_range< value_op_iterator > op_values() const
const SDValue & getOperand(unsigned Num) const
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
unsigned getVarArgsGPRSize() const
bool isPointerTy() const
True if this is an instance of PointerType.
static const MVT MVT_CC
Value type used for condition codes.
Carry-using nodes for multiple precision addition and subtraction.
static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
unsigned getVarArgsFPRSize() const
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
unsigned getPrefFunctionAlignment() const
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isAcquireOrStronger(AtomicOrdering ao)
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isSetCCOrZExtSetCC(const SDValue &Op, SetCCInfoAndKind &Info)
static EVT getExtensionTo64Bits(const EVT &OrigVT)
static std::pair< SDValue, SDValue > splitInt128(SDValue N, SelectionDAG &DAG)
static bool isAdvSIMDModImmType1(uint64_t Imm)
static mvt_range fp_valuetypes()
static void ReplaceReductionResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, unsigned InterOp, unsigned AcrossOp)
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
This class provides iterator support for SDUse operands that use a specific SDNode.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
static unsigned getCmpOperandFoldingProfit(SDValue Op)
Returns how profitable it is to fold a comparison's operand's shift and/or extension operations...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool CombineTo(SDValue O, SDValue N)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
void computeMaxCallFrameSize(const MachineFunction &MF)
Computes the maximum size of a callframe and the AdjustsStack property.
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
TRAP - Trapping instruction.
const APInt & getAPIntValue() const
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
const Triple & getTargetTriple() const
Value * getPointerOperand()
static bool isAdvSIMDModImmType8(uint64_t Imm)
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
static mvt_range vector_valuetypes()
self_iterator getIterator()
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
The memory access is non-temporal.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
void setVarArgsGPRIndex(int Index)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
static void changeFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Helper structure to be able to read SetCC information.
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
Type * getIndexedType() const
std::vector< ArgListEntry > ArgListTy
MO_HI12 - This flag indicates that a symbol operand represents the bits 13-24 of a 64-bit address...
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
unsigned getAlignment() const
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
static bool isSingletonEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
This structure contains all information that is necessary for lowering calls.
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
bool isBeforeLegalizeOps() const
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static SDValue GenerateTBL(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, EVT VT, EVT MemVT, SelectionDAG &DAG)
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
unsigned getNumOperands() const
Return the number of values used by this operation.
static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static uint8_t encodeAdvSIMDModImmType12(uint64_t Imm)
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
static SDValue performSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG)
NarrowVector - Given a value in the V128 register class, produce the equivalent value in the V64 regi...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
Triple - Helper class for working with autoconf configuration names.
static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, bool &FromHi)
An EXTR instruction is made up of two shifts, ORed together.
The memory access writes data.
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
static const int BlockSize
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass...
static cl::opt< bool > EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden, cl::desc("Allow AArch64 SLI/SRI formation"), cl::init(false))
bool isReleaseOrStronger(AtomicOrdering ao)
static uint8_t encodeAdvSIMDModImmType2(uint64_t Imm)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef...
void dump() const
Dump this node, for debugging.
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
const TargetLowering & getTargetLoweringInfo() const
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst *> Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
Iterator for intrusive lists based on ilist_node.
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
CCState - This class holds information needed while lowering arguments and return values...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
unsigned getBytesInStackArgArea() const
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
static unsigned getIntrinsicID(const SDNode *N)
This is the shared class of boolean and integer constants.
static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V)
static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, SDValue SplatVal, unsigned NumVecElts)
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
EVT getVectorElementType() const
Given a vector type, return the type of each element.
testing::Matcher< const detail::ErrorHolder & > Failed()
static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
static bool isCMN(SDValue Op, ISD::CondCode CC)
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
unsigned getMaximumJumpTableSize() const
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Module.h This file contains the declarations for the Module class.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Provides information about what library functions are available for the current target.
static bool isLegalArithImmed(uint64_t C)
CCValAssign - Represent assignment of one arg/retval to a location.
static unsigned getExtFactor(SDValue &V)
getExtFactor - Determine the adjustment factor for the position when generating an "extract from vect...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
BRCOND - Conditional branch.
An SDNode that represents everything that will be needed to construct a MachineInstr.
static bool isAllConstantBuildVector(const SDValue &PotentialBVec, uint64_t &ConstVal)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Byte Swap and Counting operators.
This is an abstract virtual class for memory operations.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
bool isCalledByLegalizer() const
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG)
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Helper structure to keep track of ISD::SET_CC operands.
bool hasMustTailInVarArgFunc() const
Returns true if the function is variadic and contains a musttail call.
Represents one node in the SelectionDAG.
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
void setAdjustsStack(bool V)
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded, TargetLoweringOpt &TLO) const override
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
const Function & getFunction() const
Return the LLVM function that this machine code represents.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
static void changeFPCCToANDAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
Convert a DAG fp condition code to an AArch64 CC.
unsigned logBase2() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
Fold a floating-point divide by power of two into fixed-point to floating-point conversion.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static bool isZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
unsigned getVectorNumElements() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Class to represent vector types.
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
EVT getMemoryVT() const
Return the type of the in-memory value.
static unsigned getNZCVToSatisfyCondCode(CondCode Code)
Given a condition code, return NZCV flags that would satisfy that condition.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
static bool performTBISimplification(SDValue Addr, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Simplify Addr given that the top byte of it is ignored by HW during address translation.
Class for arbitrary precision integers.
unsigned getByValAlign() const
CodeModel::Model getCodeModel() const
Returns the code model.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
Value * getIRStackGuard(IRBuilder<> &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
iterator_range< use_iterator > uses()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector...
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static use_iterator use_end()
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ANY_EXTEND - Used for integer types. The high bits are undefined.
static SDValue performNVCASTCombine(SDNode *N)
Get rid of unnecessary NVCASTs (that don't change the type).
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(StringLiteral S, T Value)
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG)
virtual Value * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
CATCHRET - Represents a return from a catch block funclet.
static SDValue performPostLD1Combine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, bool IsLaneOp)
Target-specific DAG combine function for post-increment LD1 (lane) and post-increment LD1R...
Flags
Flags values. These may be or'd together.
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static uint8_t encodeAdvSIMDModImmType5(uint64_t Imm)
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
static bool canEmitConjunction(const SDValue Val, bool &CanNegate, bool &MustBeFirst, bool WillNegate, unsigned Depth=0)
Returns true if Val is a tree of AND/OR/SETCC operations that can be expressed as a conjunction...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static bool isEquivalentMaskless(unsigned CC, unsigned width, ISD::LoadExtType ExtType, int AddConstant, int CompConstant)
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
BR_JT - Jumptable branch.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
static bool isAdvSIMDModImmType10(uint64_t Imm)
#define FALKOR_STRIDED_ACCESS_MD
void emitReservedArgRegCallError(const MachineFunction &MF) const
static uint8_t encodeAdvSIMDModImmType8(uint64_t Imm)
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
These are IR-level optimization flags that may be propagated to SDNodes.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
bool is64BitVector() const
Return true if this is a 64-bit vector type.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned getNumArgOperands() const
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isVector() const
Return true if this is a vector value type.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG)
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Bitwise operators - logical and, logical or, logical xor.
pointer data()
Return a pointer to the vector's buffer, even if empty().
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2, return the log base 2 integer value.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
unsigned getLocMemOffset() const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
static uint8_t encodeAdvSIMDModImmType4(uint64_t Imm)
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "...
LLVM_NODISCARD bool empty() const
bool is128BitVector() const
Return true if this is a 128-bit vector type.
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
static uint8_t encodeAdvSIMDModImmType11(uint64_t Imm)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
const Function * getParent() const
Return the enclosing method, or null if none.
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
const TargetSubtargetInfo & getSubtarget() const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Flags getFlags() const
Return the raw flags of the source value,.
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
The memory access always returns the same value (or traps).
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, SelectionDAG &DAG)
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm, const APInt &Demanded, TargetLowering::TargetLoweringOpt &TLO, unsigned NewOpc)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Type * getValueType() const
static SDValue performIntrinsicCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St)
Replace a splat of a scalar to a vector store by scalar stores of the scalar value.
static const int LAST_INDEXED_MODE
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
SDValue getValue(unsigned R) const
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
const uint32_t * getTLSCallPreservedMask() const
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
void setBytesInStackArgArea(unsigned bytes)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, const Value *PtrVal, unsigned Alignment, AtomicOrdering Ordering, SyncScope::ID SSID)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands...
static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St)
Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
const MachinePointerInfo & getPointerInfo() const
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
ConstantSDNode * getConstantSplatNode(BitVector *UndefElements=nullptr) const
Returns the splatted constant or null if this is not a constant splat.
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
bool EnableExtLdPromotion
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC)
changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 CC
MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo)
Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one...
void setReturnAddressIsTaken(bool s)
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
bool hasBasePointer(const MachineFunction &MF) const
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
ArrayRef< int > getMask() const
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
const AArch64InstrInfo * getInstrInfo() const override
static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue getValueType(EVT)
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
PREFETCH - This corresponds to a prefetch intrinsic.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
bool isUndef() const
Return true if the type of the node type undefined.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone...
Primary interface to the complete machine description for the target machine.
Type * getElementType() const
int getVarArgsFPRIndex() const
bool hasOneUse() const
Return true if there is exactly one user of this value.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
StringRef - Represent a constant reference to a string, i.e.
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
SetCC operator - This evaluates to a true value iff the condition is true.
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
static uint8_t encodeAdvSIMDModImmType9(uint64_t Imm)
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static bool isConcatMask(ArrayRef< int > Mask, EVT VT, bool SplitLHS)
static bool isAdvSIMDModImmType11(uint64_t Imm)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool operator==(uint64_t V1, const APInt &V2)
unsigned getNumOperands() const
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
static bool isVolatile(Instruction *Inst)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
unsigned char classifyGlobalFunctionReference(const GlobalValue *GV, const TargetMachine &TM) const
const SDValue & getOperand(unsigned i) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
unsigned getLocReg() const
static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget, const TargetMachine &TM)
uint64_t getZExtValue() const
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
TRUNCATE - Completely drop the high bits.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static void Split(std::vector< std::string > &V, StringRef S)
Splits a string of comma separated items in to a vector of strings.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand *> NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const MachineOperand & getOperand(unsigned i) const
static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, AArch64CC::CondCode Predicate)
Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
Perform various unary floating-point operations inspired by libm.
static uint8_t encodeAdvSIMDModImmType7(uint64_t Imm)
VectorType * getType() const
Overload to return most specific vector type.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned getBaseRegister() const
Value * getPointerOperand()
static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode, SDValue Operand, SelectionDAG &DAG, int &ExtraSteps)
static SDValue performSRLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, cl::desc("Enable AArch64 logical imm instruction " "optimization"), cl::init(true))
const SDValue & getBasePtr() const
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
static const MachineMemOperand::Flags MOStridedAccess
LLVMContext * getContext() const
static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static bool isSplatMask(const int *Mask, EVT VT)
static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, APInt &UndefBits)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Type * getElementType() const
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
unsigned getPrefLoopAlignment() const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool empty() const
empty - Check if the array is empty.
static bool canGuaranteeTCO(CallingConv::ID CC)
Return true if the calling convention is one that we can guarantee TCO for.
bool isArrayTy() const
True if this is an instance of ArrayType.
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock *> &Exits) const override
Insert explicit copies in entry and exit blocks.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG)
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
gep_type_iterator gep_type_begin(const User *GEP)
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
void setIsSplitCSR(bool s)
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, const SDLoc &dl)