115 using namespace llvm;
117 #define DEBUG_TYPE "arm-isel" 119 STATISTIC(NumTailCalls,
"Number of tail calls");
120 STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
121 STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
123 "Number of constants with their storage promoted into constant pools");
127 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
132 cl::desc(
"Enable / disable promotion of unnamed_addr constants into " 137 cl::desc(
"Maximum size of constant to promote into a constant pool"),
141 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
146 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
149 void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT,
150 MVT PromotedBitwiseVT) {
151 if (VT != PromotedLdStVT) {
190 if (VT.
isInteger() && VT != PromotedBitwiseVT) {
213 void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
218 void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
235 for (
int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
245 static const struct {
247 const char *
const Name;
252 { RTLIB::SUB_F32,
"__subsf3vfp", ISD::SETCC_INVALID },
253 { RTLIB::MUL_F32,
"__mulsf3vfp", ISD::SETCC_INVALID },
254 { RTLIB::DIV_F32,
"__divsf3vfp", ISD::SETCC_INVALID },
257 { RTLIB::ADD_F64,
"__adddf3vfp", ISD::SETCC_INVALID },
258 { RTLIB::SUB_F64,
"__subdf3vfp", ISD::SETCC_INVALID },
259 { RTLIB::MUL_F64,
"__muldf3vfp", ISD::SETCC_INVALID },
260 { RTLIB::DIV_F64,
"__divdf3vfp", ISD::SETCC_INVALID },
264 { RTLIB::UNE_F32,
"__nesf2vfp", ISD::SETNE },
265 { RTLIB::OLT_F32,
"__ltsf2vfp", ISD::SETNE },
266 { RTLIB::OLE_F32,
"__lesf2vfp", ISD::SETNE },
267 { RTLIB::OGE_F32,
"__gesf2vfp", ISD::SETNE },
268 { RTLIB::OGT_F32,
"__gtsf2vfp", ISD::SETNE },
269 { RTLIB::UO_F32,
"__unordsf2vfp", ISD::SETNE },
270 { RTLIB::O_F32,
"__unordsf2vfp",
ISD::SETEQ },
273 { RTLIB::OEQ_F64,
"__eqdf2vfp", ISD::SETNE },
274 { RTLIB::UNE_F64,
"__nedf2vfp", ISD::SETNE },
275 { RTLIB::OLT_F64,
"__ltdf2vfp", ISD::SETNE },
276 { RTLIB::OLE_F64,
"__ledf2vfp", ISD::SETNE },
277 { RTLIB::OGE_F64,
"__gedf2vfp", ISD::SETNE },
278 { RTLIB::OGT_F64,
"__gtdf2vfp", ISD::SETNE },
279 { RTLIB::UO_F64,
"__unorddf2vfp", ISD::SETNE },
280 { RTLIB::O_F64,
"__unorddf2vfp", ISD::SETEQ },
285 { RTLIB::FPTOSINT_F64_I32,
"__fixdfsivfp", ISD::SETCC_INVALID },
286 { RTLIB::FPTOUINT_F64_I32,
"__fixunsdfsivfp", ISD::SETCC_INVALID },
287 { RTLIB::FPTOSINT_F32_I32,
"__fixsfsivfp", ISD::SETCC_INVALID },
288 { RTLIB::FPTOUINT_F32_I32,
"__fixunssfsivfp", ISD::SETCC_INVALID },
291 { RTLIB::FPROUND_F64_F32,
"__truncdfsf2vfp", ISD::SETCC_INVALID },
292 { RTLIB::FPEXT_F32_F64,
"__extendsfdf2vfp", ISD::SETCC_INVALID },
299 { RTLIB::SINTTOFP_I32_F64,
"__floatsidfvfp", ISD::SETCC_INVALID },
300 { RTLIB::UINTTOFP_I32_F64,
"__floatunssidfvfp", ISD::SETCC_INVALID },
301 { RTLIB::SINTTOFP_I32_F32,
"__floatsisfvfp", ISD::SETCC_INVALID },
302 { RTLIB::UINTTOFP_I32_F32,
"__floatunssisfvfp", ISD::SETCC_INVALID },
305 for (
const auto &LC : LibraryCalls) {
322 static const struct {
324 const char *
const Name;
411 for (
const auto &LC : LibraryCalls) {
421 static const struct {
423 const char *
const Name;
426 } MemOpsLibraryCalls[] = {
434 for (
const auto &LC : MemOpsLibraryCalls) {
444 static const struct {
446 const char *
const Name;
450 { RTLIB::FPTOSINT_F64_I64,
"__dtoi64", CallingConv::ARM_AAPCS_VFP },
451 { RTLIB::FPTOUINT_F32_I64,
"__stou64", CallingConv::ARM_AAPCS_VFP },
452 { RTLIB::FPTOUINT_F64_I64,
"__dtou64", CallingConv::ARM_AAPCS_VFP },
453 { RTLIB::SINTTOFP_I64_F32,
"__i64tos", CallingConv::ARM_AAPCS_VFP },
454 { RTLIB::SINTTOFP_I64_F64,
"__i64tod", CallingConv::ARM_AAPCS_VFP },
455 { RTLIB::UINTTOFP_I64_F32,
"__u64tos", CallingConv::ARM_AAPCS_VFP },
456 { RTLIB::UINTTOFP_I64_F64,
"__u64tod", CallingConv::ARM_AAPCS_VFP },
459 for (
const auto &LC : LibraryCalls) {
491 static const struct {
493 const char *
const Name;
497 { RTLIB::FPROUND_F64_F16,
"__aeabi_d2h", CallingConv::ARM_AAPCS },
498 { RTLIB::FPEXT_F16_F32,
"__aeabi_h2f", CallingConv::ARM_AAPCS },
501 for (
const auto &LC : LibraryCalls) {
894 HasStandaloneRem =
false;
899 const char *
const Name;
903 { RTLIB::SDIVREM_I16,
"__rt_sdiv", CallingConv::ARM_AAPCS },
904 { RTLIB::SDIVREM_I32,
"__rt_sdiv", CallingConv::ARM_AAPCS },
905 { RTLIB::SDIVREM_I64,
"__rt_sdiv64", CallingConv::ARM_AAPCS },
907 { RTLIB::UDIVREM_I8,
"__rt_udiv", CallingConv::ARM_AAPCS },
908 { RTLIB::UDIVREM_I16,
"__rt_udiv", CallingConv::ARM_AAPCS },
909 { RTLIB::UDIVREM_I32,
"__rt_udiv", CallingConv::ARM_AAPCS },
910 { RTLIB::UDIVREM_I64,
"__rt_udiv64", CallingConv::ARM_AAPCS },
913 for (
const auto &LC : LibraryCalls) {
920 const char *
const Name;
924 { RTLIB::SDIVREM_I16,
"__aeabi_idivmod", CallingConv::ARM_AAPCS },
925 { RTLIB::SDIVREM_I32,
"__aeabi_idivmod", CallingConv::ARM_AAPCS },
926 { RTLIB::SDIVREM_I64,
"__aeabi_ldivmod", CallingConv::ARM_AAPCS },
928 { RTLIB::UDIVREM_I8,
"__aeabi_uidivmod", CallingConv::ARM_AAPCS },
929 { RTLIB::UDIVREM_I16,
"__aeabi_uidivmod", CallingConv::ARM_AAPCS },
930 { RTLIB::UDIVREM_I32,
"__aeabi_uidivmod", CallingConv::ARM_AAPCS },
931 { RTLIB::UDIVREM_I64,
"__aeabi_uldivmod", CallingConv::ARM_AAPCS },
934 for (
const auto &LC : LibraryCalls) {
976 InsertFencesForAtomic =
false;
990 InsertFencesForAtomic =
true;
997 InsertFencesForAtomic =
true;
1017 if (!InsertFencesForAtomic) {
1223 std::pair<const TargetRegisterClass *, uint8_t>
1236 RRC = &ARM::DPRRegClass;
1246 RRC = &ARM::DPRRegClass;
1250 RRC = &ARM::DPRRegClass;
1254 RRC = &ARM::DPRRegClass;
1258 return std::make_pair(RRC, Cost);
1432 return &ARM::QQPRRegClass;
1434 return &ARM::QQQQPRRegClass;
1443 unsigned &PrefAlign)
const {
1444 if (!isa<MemIntrinsic>(CI))
1465 for (
unsigned i = 0; i != NumVals; ++i) {
1497 if (
auto Const = dyn_cast<ConstantSDNode>(Op.
getOperand(1)))
1498 return Const->getZExtValue() == 16;
1505 if (
auto Const = dyn_cast<ConstantSDNode>(Op.
getOperand(1)))
1506 return Const->getZExtValue() == 16;
1513 if (
auto Const = dyn_cast<ConstantSDNode>(Op.
getOperand(1)))
1514 return Const->getZExtValue() == 16;
1549 InvalidOnQNaN =
true;
1555 InvalidOnQNaN =
false;
1566 InvalidOnQNaN =
false;
1573 InvalidOnQNaN =
false;
1584 InvalidOnQNaN =
false;
1593 #include "ARMGenCallingConv.inc" 1600 bool isVarArg)
const {
1636 bool isVarArg)
const {
1637 return CCAssignFnForNode(CC,
false, isVarArg);
1641 bool isVarArg)
const {
1642 return CCAssignFnForNode(CC,
true, isVarArg);
1649 bool isVarArg)
const {
1650 switch (getEffectiveCallingConv(CC, isVarArg)) {
1654 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1656 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1658 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1660 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1662 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1664 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1670 SDValue ARMTargetLowering::LowerCallResult(
1682 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
1687 if (i == 0 && isThisReturn) {
1689 "unexpected return calling convention register assignment");
1761 Chain, dl, Arg, PtrOff,
1774 unsigned id = Subtarget->
isLittle() ? 0 : 1;
1810 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].Flags.
isSRet();
1811 bool isThisReturn =
false;
1812 bool isSibCall =
false;
1821 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1823 Outs, OutVals,
Ins, DAG);
1826 "site marked musttail");
1842 unsigned NumBytes = CCInfo.getNextStackOffset();
1861 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
1863 ++i, ++realArgIdx) {
1865 SDValue Arg = OutVals[realArgIdx];
1867 bool isByVal = Flags.
isByVal();
1895 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1896 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1900 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1901 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1905 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1906 dl, DAG, VA, Flags));
1909 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1910 StackPtr, MemOpChains, Flags);
1916 "unexpected calling convention register assignment");
1918 "unexpected use of 'returned'");
1919 isThisReturn =
true;
1922 }
else if (isByVal) {
1924 unsigned offset = 0;
1928 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1929 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
1931 if (CurByValIdx < ByValArgsCount) {
1933 unsigned RegBegin, RegEnd;
1934 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1939 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1946 RegsToPass.
push_back(std::make_pair(j, Load));
1951 offset = RegEnd - RegBegin;
1953 CCInfo.nextInRegsParam();
1969 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1973 }
else if (!isSibCall) {
1976 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1977 dl, DAG, VA, Flags));
1981 if (!MemOpChains.
empty())
1987 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
1989 RegsToPass[i].
second, InFlag);
1996 bool isDirect =
false;
2002 GV =
G->getGlobal();
2006 bool isARMFunc = !Subtarget->
isThumb() || (isStub && !Subtarget->
isMClass());
2007 bool isLocalARMFunc =
false;
2013 "long-calls codegen is not position independent!");
2017 if (isa<GlobalAddressSDNode>(Callee)) {
2030 const char *Sym = S->getSymbol();
2036 ARMPCLabelIndex, 0);
2044 }
else if (isa<GlobalAddressSDNode>(Callee)) {
2048 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2050 bool PreferIndirect =
2053 return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
2056 if (!PreferIndirect) {
2075 "Windows is the only supported COFF target");
2093 const char *Sym = S->getSymbol();
2098 ARMPCLabelIndex, 4);
2114 if ((!isDirect || isARMFunc) && !Subtarget->
hasV5TOps())
2119 if (!isDirect && !Subtarget->
hasV5TOps())
2130 std::vector<SDValue> Ops;
2131 Ops.push_back(Chain);
2132 Ops.push_back(Callee);
2136 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i)
2137 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2138 RegsToPass[i].second.getValueType()));
2151 isThisReturn =
false;
2157 assert(Mask &&
"Missing call preserved mask for calling convention");
2162 Ops.push_back(InFlag);
2171 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
2181 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2182 InVals, isThisReturn,
2183 isThisReturn ? OutVals[0] :
SDValue());
2190 void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &
Size,
2191 unsigned Align)
const {
2199 unsigned AlignInRegs = Align / 4;
2200 unsigned Waste = (
ARM::R4 -
Reg) % AlignInRegs;
2201 for (
unsigned i = 0; i < Waste; ++i)
2214 if (NSAAOffset != 0 && Size > Excess) {
2226 unsigned ByValRegBegin =
Reg;
2227 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4,
ARM::R4);
2231 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2237 Size = std::max<int>(Size - Excess, 0);
2262 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2270 SDValue Ptr = Ld->getBasePtr();
2288 ARMTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
2291 bool isCalleeStructRet,
2292 bool isCallerStructRet,
2307 !isa<GlobalAddressSDNode>(Callee.
getNode()))
2321 if (isCalleeStructRet || isCallerStructRet)
2348 if (CalleeCC != CallerCC) {
2350 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2363 if (!Outs.
empty()) {
2367 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2375 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2377 ++i, ++realArgIdx) {
2380 SDValue Arg = OutVals[realArgIdx];
2391 if (!ArgLocs[++i].isRegLoc())
2394 if (!ArgLocs[++i].isRegLoc())
2396 if (!ArgLocs[++i].isRegLoc())
2421 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2443 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
2446 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
2450 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2477 bool isLittleEndian = Subtarget->
isLittle();
2484 for (
unsigned i = 0, realRVLocIdx = 0;
2486 ++i, ++realRVLocIdx) {
2490 SDValue Arg = OutVals[realRVLocIdx];
2491 bool ReturnF16 =
false;
2534 HalfGPRs.
getValue(isLittleEndian ? 0 : 1),
2540 HalfGPRs.
getValue(isLittleEndian ? 1 : 0),
2555 fmrrd.
getValue(isLittleEndian ? 0 : 1),
2561 fmrrd.
getValue(isLittleEndian ? 1 : 0),
2579 else if (ARM::DPRRegClass.
contains(*I))
2607 bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
2631 if (Copies.
size() > 2)
2666 bool HasRet =
false;
2682 bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
2688 if (!CI->
isTailCall() || Attr.getValueAsString() ==
"true")
2702 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
2740 Twine(AFI->createPICLabelUId())
2744 return LowerGlobalAddress(GA, DAG);
2764 unsigned ARMPCLabelIndex = 0;
2767 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
2770 if (!IsPositionIndependent) {
2773 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
2784 if (!IsPositionIndependent)
2815 ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
2818 "This function expects a Darwin target");
2823 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
2860 ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,
2904 const auto *GA = cast<GlobalAddressSDNode>(
Op);
2920 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2940 Entry.
Node = Argument;
2942 Args.push_back(Entry);
2950 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2951 return CallResult.first;
2973 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2981 PtrVT, dl, Chain, Offset,
2989 PtrVT, dl, Chain, Offset,
2999 PtrVT, dl, Chain, Offset,
3015 return LowerGlobalTLSAddressDarwin(Op, DAG);
3018 return LowerGlobalTLSAddressWindows(Op, DAG);
3027 return LowerToTLSGeneralDynamicModel(GA, DAG);
3030 return LowerToTLSExecModels(GA, DAG, model);
3039 for (
auto *U : V->
users())
3041 while (!Worklist.
empty()) {
3043 if (isa<ConstantExpr>(U)) {
3044 for (
auto *UU : U->users())
3050 if (!
I ||
I->getParent()->getParent() !=
F)
3079 if (!GVar || !GVar->hasInitializer() ||
3080 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3081 !GVar->hasLocalLinkage())
3086 auto *
Init = GVar->getInitializer();
3088 Init->needsRelocation())
3100 unsigned RequiredPadding = 4 - (Size % 4);
3101 bool PaddingPossible =
3102 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3107 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3131 if (RequiredPadding != 4) {
3136 while (RequiredPadding--)
3149 ++NumConstpoolPromoted;
3154 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3155 if (!(GV = GA->getBaseObject()))
3157 if (
const auto *V = dyn_cast<GlobalVariable>(GV))
3158 return V->isConstant();
3159 return isa<Function>(GV);
3167 return LowerGlobalAddressWindows(Op, DAG);
3169 return LowerGlobalAddressELF(Op, DAG);
3171 return LowerGlobalAddressDarwin(Op, DAG);
3179 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3198 }
else if (Subtarget->
isROPI() && IsRO) {
3203 }
else if (Subtarget->
isRWPI() && !IsRO) {
3244 "ROPI/RWPI not currently supported for Darwin");
3247 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3270 "Windows on ARM expects to use movw/movt");
3272 "ROPI/RWPI not currently supported for Windows");
3275 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3279 else if (!TM.shouldAssumeDSOLocal(*GV->
getParent(), GV))
3314 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(
SDValue Op,
3324 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
3339 unsigned PCAdj = IsPositionIndependent ? (Subtarget->
isThumb() ? 4 : 8) : 0;
3349 if (IsPositionIndependent) {
3417 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3450 unsigned isRead = ~cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue() & 1;
3456 unsigned isData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
3459 isRead = ~isRead & 1;
3460 isData = ~isData & 1;
3486 const SDLoc &dl)
const {
3492 RC = &ARM::tGPRRegClass;
3494 RC = &ARM::GPRRegClass;
3529 const Value *OrigArg,
3530 unsigned InRegsParamRecordIdx,
3531 int ArgOffset,
unsigned ArgSize)
const {
3546 unsigned RBegin, REnd;
3556 ArgOffset = -4 * (
ARM::R4 - RBegin);
3566 for (
unsigned Reg = RBegin, i = 0;
Reg < REnd; ++
Reg, ++i) {
3575 if (!MemOps.
empty())
3584 unsigned TotalArgRegsSaveSize,
3585 bool ForceMutable)
const {
3594 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain,
nullptr,
3600 SDValue ARMTargetLowering::LowerFormalArguments(
3618 unsigned CurArgIdx = 0;
3630 unsigned ArgRegBegin =
ARM::R4;
3631 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3642 unsigned RBegin, REnd;
3644 ArgRegBegin = std::min(ArgRegBegin, RBegin);
3650 int lastInsIndex = -1;
3654 ArgRegBegin = std::min(ArgRegBegin, (
unsigned)GPRArgRegs[RegIdx]);
3657 unsigned TotalArgRegsSaveSize = 4 * (
ARM::R4 - ArgRegBegin);
3661 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3663 if (Ins[VA.
getValNo()].isOrigArg()) {
3664 std::advance(CurOrigArg,
3665 Ins[VA.
getValNo()].getOrigArgIndex() - CurArgIdx);
3666 CurArgIdx = Ins[VA.
getValNo()].getOrigArgIndex();
3676 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3687 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3692 ArgValue, ArgValue1,
3695 ArgValue, ArgValue2,
3698 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3704 RC = &ARM::HPRRegClass;
3706 RC = &ARM::SPRRegClass;
3708 RC = &ARM::DPRRegClass;
3710 RC = &ARM::QPRRegClass;
3713 : &ARM::GPRRegClass;
3753 if (index != lastInsIndex)
3762 assert(Ins[index].isOrigArg() &&
3763 "Byval arguments cannot be implicit");
3767 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3782 lastInsIndex = index;
3789 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3791 TotalArgRegsSaveSize);
3801 return CFP->getValueAPF().isPosZero();
3807 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
CP->getConstVal()))
3808 return CFP->getValueAPF().isPosZero();
3826 const SDLoc &dl)
const {
3828 unsigned C = RHSC->getZExtValue();
3890 bool InvalidOnQNaN)
const {
3928 std::pair<SDValue, SDValue>
3995 return std::make_pair(Value, OverflowCmp);
4006 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4015 ARMcc, CCR, OverflowCmp);
4023 SDLoc DL(BoolCarry);
4097 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4101 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4116 if (CMOVTrue && CMOVFalse) {
4118 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4122 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4124 False = SelectFalse;
4125 }
else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4136 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4152 bool &swpCmpOps,
bool &swpVselOps) {
4180 swpCmpOps = !swpCmpOps;
4181 swpVselOps = !swpVselOps;
4217 ARMcc, CCR, duplicateCmp(Cmp, DAG));
4244 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4246 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4254 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4256 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4277 uint64_t &K,
bool &usat) {
4284 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4296 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4299 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4302 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4303 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4304 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4305 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4317 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4335 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4341 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4342 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4343 int64_t PosVal =
std::max(Val1, Val2);
4344 int64_t NegVal = std::min(Val1, Val2);
4346 if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4347 (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4353 else if (NegVal == 0)
4359 K = (uint64_t)PosVal;
4384 SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
4392 SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
4393 V = (KTmp == TrueVal) ? FalseVal : TrueVal;
4394 SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
4398 if (*K != KTmp || V != VTmp)
4415 uint64_t SatConstant;
4487 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4488 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4493 FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4501 bool swpCmpOps =
false;
4502 bool swpVselOps =
false;
4515 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4517 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4521 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4522 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4554 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4556 Ld->getPointerInfo(), Ld->getAlignment(),
4557 Ld->getMemOperand()->getFlags());
4572 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4573 SDValue Ptr = Ld->getBasePtr();
4576 Ld->getAlignment(), Ld->getMemOperand()->
getFlags());
4579 unsigned NewAlign =
MinAlign(Ld->getAlignment(), 4);
4583 Ld->getPointerInfo().getWithOffset(4), NewAlign,
4584 Ld->getMemOperand()->getFlags());
4602 bool LHSSeenZero =
false;
4604 bool RHSSeenZero =
false;
4606 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4622 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4625 Chain, Dest, ARMcc, CCR, Cmp);
4637 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4665 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4717 std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.
getValue(0), DAG, ARMcc);
4734 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4737 Chain, Dest, ARMcc, CCR, Cmp);
4743 if (
SDValue Result = OptimizeVFPBrcond(Op, DAG))
4749 FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4752 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4755 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4794 DAG.
getLoad(PTy, dl, Chain, Addr,
4811 const bool HasFullFP16 =
4863 "Invalid type for custom lowering!");
4865 const bool HasFullFP16 =
4893 return DAG.
getNode(Opc, dl, VT, Op);
4924 bool UseNEON = !InGPR && Subtarget->
hasNEON();
5007 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
5009 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5030 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
5041 unsigned ARMTargetLowering::getRegisterByName(
const char* RegName,
EVT VT,
5044 .Case(
"sp", ARM::SP)
5060 &&
"ExpandREAD_REGISTER called for non-i64 type result.");
5105 NewIndex *= APIntIndex;
5107 if (NewIndex.getBitWidth() > 32)
5137 const bool HasFullFP16 = Subtarget->
hasFullFP16();
5194 ZeroExtend->getValueType(0) !=
MVT::i32)
5197 auto Copy = ZeroExtend->use_begin();
5282 SDValue LoBigShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
5290 ? DAG.getNode(Opc, dl, VT, ShOpHi,
5291 DAG.getConstant(VTBits - 1, dl, VT))
5292 : DAG.getConstant(0, dl, VT);
5293 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl,
MVT::i32),
5299 return DAG.getMergeValues(Ops, dl);
5331 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl,
MVT::i32),
5335 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
5338 return DAG.getMergeValues(Ops, dl);
5422 assert(ST->
hasNEON() &&
"Custom ctpop lowering requires NEON.");
5425 "Unexpected type for custom ctpop lowering");
5433 unsigned EltSize = 8;
5496 "Unknown shift to lower!");
5525 bool Invert =
false;
5534 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->
get();
5551 Merged = DAG.
getNOT(dl, Merged, CmpVT);
5561 switch (SetCCOpcode) {
5602 switch (SetCCOpcode) {
5667 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
5670 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
5676 Result = DAG.
getNOT(dl, Result, VT);
5716 const SDLoc &dl,
EVT &VT,
bool is128Bits,
5718 unsigned OpCmode, Imm;
5728 switch (SplatBitSize) {
5733 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
5742 if ((SplatBits & ~0xff) == 0) {
5748 if ((SplatBits & ~0xff00) == 0) {
5751 Imm = SplatBits >> 8;
5762 if ((SplatBits & ~0xff) == 0) {
5768 if ((SplatBits & ~0xff00) == 0) {
5771 Imm = SplatBits >> 8;
5774 if ((SplatBits & ~0xff0000) == 0) {
5777 Imm = SplatBits >> 16;
5780 if ((SplatBits & ~0xff000000) == 0) {
5783 Imm = SplatBits >> 24;
5790 if ((SplatBits & ~0xffff) == 0 &&
5791 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
5794 Imm = SplatBits >> 8;
5798 if ((SplatBits & ~0xffffff) == 0 &&
5799 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
5802 Imm = SplatBits >> 16;
5817 uint64_t BitMask = 0xff;
5821 for (
int ByteNum = 0; ByteNum < 8; ++ByteNum) {
5822 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
5825 }
else if ((SplatBits & BitMask) != 0) {
5834 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
5864 APInt INTVal = FPVal.bitcastToAPInt();
5917 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
5922 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
5976 unsigned ExpectedElt = Imm;
5977 for (
unsigned i = 1; i < NumElts; ++i) {
5981 if (ExpectedElt == NumElts)
5984 if (M[i] < 0)
continue;
5985 if (ExpectedElt != static_cast<unsigned>(M[i]))
5993 bool &ReverseVEXT,
unsigned &Imm) {
5995 ReverseVEXT =
false;
6006 unsigned ExpectedElt = Imm;
6007 for (
unsigned i = 1; i < NumElts; ++i) {
6011 if (ExpectedElt == NumElts * 2) {
6016 if (M[i] < 0)
continue;
6017 if (ExpectedElt != static_cast<unsigned>(M[i]))
6032 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
6033 "Only possible block sizes for VREV are: 16, 32, 64");
6040 unsigned BlockElts = M[0] + 1;
6043 BlockElts = BlockSize / EltSz;
6045 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
6048 for (
unsigned i = 0; i < NumElts; ++i) {
6049 if (M[i] < 0)
continue;
6050 if ((
unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
6066 if (Mask.
size() == Elements * 2)
6067 return Index / Elements;
6068 return Mask[
Index] == 0 ? 0 : 1;
6098 if (M.
size() != NumElts && M.
size() != NumElts*2)
6106 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6108 for (
unsigned j = 0; j < NumElts; j += 2) {
6109 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
6110 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + NumElts + WhichResult))
6115 if (M.
size() == NumElts*2)
6130 if (M.
size() != NumElts && M.
size() != NumElts*2)
6133 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6135 for (
unsigned j = 0; j < NumElts; j += 2) {
6136 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
6137 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + WhichResult))
6142 if (M.
size() == NumElts*2)
6162 if (M.
size() != NumElts && M.
size() != NumElts*2)
6165 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6167 for (
unsigned j = 0; j < NumElts; ++j) {
6168 if (M[i+j] >= 0 && (
unsigned) M[i+j] != 2 * j + WhichResult)
6173 if (M.
size() == NumElts*2)
6192 if (M.
size() != NumElts && M.
size() != NumElts*2)
6195 unsigned Half = NumElts / 2;
6196 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6198 for (
unsigned j = 0; j < NumElts; j += Half) {
6199 unsigned Idx = WhichResult;
6200 for (
unsigned k = 0; k < Half; ++k) {
6201 int MIdx = M[i + j + k];
6202 if (MIdx >= 0 && (
unsigned) MIdx != Idx)
6209 if (M.
size() == NumElts*2)
6233 if (M.
size() != NumElts && M.
size() != NumElts*2)
6236 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6238 unsigned Idx = WhichResult * NumElts / 2;
6239 for (
unsigned j = 0; j < NumElts; j += 2) {
6240 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != Idx) ||
6241 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != Idx + NumElts))
6247 if (M.
size() == NumElts*2)
6266 if (M.
size() != NumElts && M.
size() != NumElts*2)
6269 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
6271 unsigned Idx = WhichResult * NumElts / 2;
6272 for (
unsigned j = 0; j < NumElts; j += 2) {
6273 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != Idx) ||
6274 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != Idx))
6280 if (M.
size() == NumElts*2)
6293 unsigned &WhichResult,
6296 if (
isVTRNMask(ShuffleMask, VT, WhichResult))
6298 if (
isVUZPMask(ShuffleMask, VT, WhichResult))
6300 if (
isVZIPMask(ShuffleMask, VT, WhichResult))
6318 if (NumElts != M.
size())
6322 for (
unsigned i = 0; i != NumElts; ++i)
6323 if (M[i] >= 0 && M[i] != (
int) (NumElts - 1 - i))
6335 if (!isa<ConstantSDNode>(N))
6337 Val = cast<ConstantSDNode>(
N)->getZExtValue();
6340 if (Val <= 255 || ~Val <= 255)
6357 APInt SplatBits, SplatUndef;
6358 unsigned SplatBitSize;
6360 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6364 if (SplatBitSize <= 64) {
6377 uint64_t NegatedImm = (~SplatBits).getZExtValue();
6406 bool isOnlyLowElement =
true;
6407 bool usesOnlyOneValue =
true;
6408 bool hasDominantValue =
false;
6415 for (
unsigned i = 0; i < NumElts; ++i) {
6420 isOnlyLowElement =
false;
6421 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6424 ValueCounts.
insert(std::make_pair(V, 0));
6425 unsigned &Count = ValueCounts[V];
6428 if (++Count > (NumElts / 2)) {
6429 hasDominantValue =
true;
6433 if (ValueCounts.
size() != 1)
6434 usesOnlyOneValue =
false;
6436 Value = ValueCounts.
begin()->first;
6438 if (ValueCounts.
empty())
6450 if (hasDominantValue && EltSize <= 32) {
6479 if (!usesOnlyOneValue) {
6482 for (
unsigned I = 0;
I < NumElts; ++
I) {
6496 for (
unsigned i = 0; i < NumElts; ++i)
6501 Val = LowerBUILD_VECTOR(Val, DAG, ST);
6505 if (usesOnlyOneValue) {
6507 if (isConstant && Val.
getNode())
6520 SDValue shuffle = ReconstructShuffle(Op, DAG);
6534 Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
6536 HVT, dl,
makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6538 Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
6546 if (EltSize >= 32) {
6552 for (
unsigned i = 0; i < NumElts; ++i)
6564 if (!isConstant && !usesOnlyOneValue) {
6566 for (
unsigned i = 0 ; i < NumElts; ++i) {
6588 struct ShuffleSourceInfo {
6591 unsigned MaxElt = 0;
6601 int WindowScale = 1;
6603 ShuffleSourceInfo(
SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
6611 for (
unsigned i = 0; i < NumElts; ++i) {
6619 }
else if (!isa<ConstantSDNode>(V.
getOperand(1))) {
6632 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
6639 if (Sources.
size() > 2)
6645 for (
auto &
Source : Sources) {
6646 EVT SrcEltTy =
Source.Vec.getValueType().getVectorElementType();
6647 if (SrcEltTy.
bitsLT(SmallestEltTy))
6648 SmallestEltTy = SrcEltTy;
6650 unsigned ResMultiplier =
6658 for (
auto &Src : Sources) {
6659 EVT SrcVT = Src.ShuffleVec.getValueType();
6677 DAG.
getUNDEF(Src.ShuffleVec.getValueType()));
6684 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6689 if (Src.MinElt >= NumSrcElts) {
6694 Src.WindowBase = -NumSrcElts;
6695 }
else if (Src.MaxElt < NumSrcElts) {
6712 Src.WindowBase = -Src.MinElt;
6719 for (
auto &Src : Sources) {
6721 if (SrcEltTy == SmallestEltTy)
6726 Src.WindowBase *= Src.WindowScale;
6732 assert(Src.ShuffleVec.getValueType() == ShuffleVT););
6743 int EltNo = cast<ConstantSDNode>(Entry.
getOperand(1))->getSExtValue();
6751 int LanesDefined = BitsDefined / BitsPerShuffleLane;
6755 int *LaneMask = &
Mask[i * ResMultiplier];
6757 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
6758 ExtractBase += NumElts * (Src - Sources.begin());
6759 for (
int j = 0; j < LanesDefined; ++j)
6760 LaneMask[j] = ExtractBase + j;
6769 assert(Sources.size() <= 2 &&
"Too many sources!");
6772 for (
unsigned i = 0; i < Sources.size(); ++i)
6773 ShuffleOps[i] = Sources[i].ShuffleVec;
6776 ShuffleOps[1],
Mask);
6787 unsigned PFIndexes[4];
6788 for (
unsigned i = 0; i != 4; ++i) {
6792 PFIndexes[i] = M[i];
6796 unsigned PFTableIndex =
6797 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6799 unsigned Cost = (PFEntry >> 30);
6805 bool ReverseVEXT, isV_UNDEF;
6806 unsigned Imm, WhichResult;
6809 return (EltSize >= 32 ||
6825 unsigned OpNum = (PFEntry >> 26) & 0x0F;
6826 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
6827 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
6847 if (OpNum == OP_COPY) {
6848 if (LHSID == (1*9+2)*9+3)
return LHS;
6849 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
6886 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
6890 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
6894 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
6908 I = ShuffleMask.
begin(),
E = ShuffleMask.
end();
I !=
E; ++
I)
6926 "Expect an v8i16/v16i8 type");
6931 unsigned ExtractNum = (VT ==
MVT::v16i8) ? 8 : 4;
6952 if (EltSize <= 32) {
6956 if (Lane == -1) Lane = 0;
6967 bool IsScalarToVector =
true;
6970 IsScalarToVector =
false;
6973 if (IsScalarToVector)
6982 if (
isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
7006 unsigned WhichResult;
7009 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
7013 .getValue(WhichResult);
7039 }) &&
"Unexpected shuffle index into UNDEF operand!");
7042 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
7045 assert((WhichResult == 0) &&
7046 "In-place shuffle of concat can only have one result!");
7059 unsigned PFIndexes[4];
7060 for (
unsigned i = 0; i != 4; ++i) {
7061 if (ShuffleMask[i] < 0)
7064 PFIndexes[i] = ShuffleMask[i];
7068 unsigned PFTableIndex =
7069 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7071 unsigned Cost = (PFEntry >> 30);
7078 if (EltSize >= 32) {
7086 for (
unsigned i = 0; i < NumElts; ++i) {
7087 if (ShuffleMask[i] < 0)
7091 ShuffleMask[i] < (
int)NumElts ? V1 : V2,
7112 if (!isa<ConstantSDNode>(Lane))
7121 if (!isa<ConstantSDNode>(Lane))
7137 "unexpected CONCAT_VECTORS");
7166 unsigned HiElt = 1 - LoElt;
7171 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
7174 if (Hi0->getSExtValue() == Lo0->
getSExtValue() >> 32 &&
7175 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
7178 if (Hi0->isNullValue() && Hi1->isNullValue())
7191 unsigned HalfSize = EltSize / 2;
7193 if (!
isIntN(HalfSize,
C->getSExtValue()))
7196 if (!
isUIntN(HalfSize,
C->getZExtValue()))
7234 switch (OrigSimpleTy) {
7250 unsigned ExtOpcode) {
7302 "Expected extending load");
7308 DAG.
getNode(Opcode,
SDLoc(newLoad),
LD->getValueType(0), newLoad);
7329 unsigned NumElts = VT.getVectorNumElements();
7333 for (
unsigned i = 0; i != NumElts; ++i) {
7370 "unexpected type for custom-lowering ISD::MUL");
7373 unsigned NewOpc = 0;
7377 if (isN0SExt && isN1SExt)
7382 if (isN0ZExt && isN1ZExt)
7384 else if (isN1SExt || isN1ZExt) {
7418 "unexpected types for extended operands to VMULL");
7419 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
7433 return DAG.
getNode(N0->getOpcode(), DL, VT,
7513 "unexpected type for custom-lowering ISD::SDIV");
7549 "unexpected type for custom-lowering ISD::UDIV");
7684 bool ShouldUseSRet = Subtarget->
isAPCS_ABI();
7686 if (ShouldUseSRet) {
7688 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
7689 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
7690 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign,
false);
7696 Entry.IsSExt =
false;
7697 Entry.IsZExt =
false;
7698 Entry.IsSRet =
true;
7699 Args.push_back(Entry);
7706 Entry.IsSExt =
false;
7707 Entry.IsZExt =
false;
7708 Args.push_back(Entry);
7711 (ArgVT ==
MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
7719 .setCallee(CC, RetTy, Callee, std::move(Args))
7720 .setDiscardResult(ShouldUseSRet);
7721 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
7724 return CallResult.first;
7745 "unexpected type for custom lowering DIV");
7751 const char *
Name =
nullptr;
7753 Name = (VT ==
MVT::i32) ?
"__rt_sdiv" :
"__rt_sdiv64";
7755 Name = (VT ==
MVT::i32) ?
"__rt_udiv" :
"__rt_udiv64";
7761 for (
auto AI : {1, 0}) {
7765 Args.push_back(Arg);
7768 CallLoweringInfo CLI(DAG);
7772 ES, std::move(Args));
7782 ARMTargetLowering::BuildSDIVPow2(
SDNode *N,
const APInt &Divisor,
7802 if (!(MinSize && HasDivide))
7815 if (Divisor.
sgt(128))
7822 bool Signed)
const {
7824 "unexpected type for custom lowering DIV");
7830 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7846 void ARMTargetLowering::ExpandDIV_Windows(
7853 "unexpected type for custom lowering DIV");
7858 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7915 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
7924 "AtomicCmpSwap on types less than 64 should be legal");
7930 ARM::CMP_SWAP_64,
SDLoc(N),
7952 "Custom lowering is MSVCRT specific!");
7962 TargetLowering::ArgListEntry Entry;
7966 Entry.IsZExt =
true;
7967 Args.push_back(Entry);
7971 Entry.IsZExt =
true;
7972 Args.push_back(Entry);
7983 bool IsTC = TLI.isInTailCallPosition(DAG, Op.
getNode(), TCChain) &&
7993 std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI);
7996 return !CI.second.getNode() ? DAG.
getRoot() : CI.first;
8052 return LowerDIV_Windows(Op, DAG,
true);
8056 return LowerDIV_Windows(Op, DAG,
false);
8062 return LowerSignedALUO(Op, DAG);
8065 return LowerUnsignedALUO(Op, DAG);
8073 return LowerDYNAMIC_STACKALLOC(Op, DAG);
8084 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
8134 Res = LowerREM(N, DAG);
8138 Res = LowerDivRem(
SDValue(N, 0), DAG);
8172 "ROPI/RWPI not currently supported with SjLj");
8182 bool isThumb2 = Subtarget->
isThumb2();
8185 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
8191 : &ARM::GPRRegClass;
8210 BuildMI(*MBB, MI, dl, TII->
get(ARM::t2LDRpci), NewVReg1)
8211 .addConstantPoolIndex(CPI)
8216 BuildMI(*MBB, MI, dl, TII->
get(ARM::t2ORRri), NewVReg2)
8222 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg3)
8225 BuildMI(*MBB, MI, dl, TII->
get(ARM::t2STRi12))
8231 }
else if (isThumb) {
8240 BuildMI(*MBB, MI, dl, TII->
get(ARM::tLDRpci), NewVReg1)
8241 .addConstantPoolIndex(CPI)
8245 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg2)
8250 BuildMI(*MBB, MI, dl, TII->
get(ARM::tMOVi8), NewVReg3)
8255 BuildMI(*MBB, MI, dl, TII->
get(ARM::tORR), NewVReg4)
8261 BuildMI(*MBB, MI, dl, TII->
get(ARM::tADDframe), NewVReg5)
8276 BuildMI(*MBB, MI, dl, TII->
get(ARM::LDRi12), NewVReg1)
8277 .addConstantPoolIndex(CPI)
8282 BuildMI(*MBB, MI, dl, TII->
get(ARM::PICADD), NewVReg2)
8295 void ARMTargetLowering::EmitSjLjDispatchBlock(
MachineInstr &MI,
8305 : &ARM::GPRnopcRegClass;
8310 unsigned MaxCSNum = 0;
8313 if (!BB->isEHPad())
continue;
8318 II = BB->begin(),
IE = BB->end(); II !=
IE; ++II) {
8319 if (!II->isEHLabel())
continue;
8321 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
8326 CSI = CallSiteIdxs.
begin(),
CSE = CallSiteIdxs.
end();
8327 CSI !=
CSE; ++CSI) {
8328 CallSiteNumToLPad[*CSI].push_back(&*BB);
8329 MaxCSNum =
std::max(MaxCSNum, *CSI);
8336 std::vector<MachineBasicBlock*> LPadList;
8338 LPadList.reserve(CallSiteNumToLPad.
size());
8339 for (
unsigned I = 1;
I <= MaxCSNum; ++
I) {
8342 II = MBBList.
begin(),
IE = MBBList.
end(); II !=
IE; ++II) {
8343 LPadList.push_back(*II);
8344 InvokeBBs.
insert((*II)->pred_begin(), (*II)->pred_end());
8348 assert(!LPadList.empty() &&
8349 "No landing pad destinations for the dispatch jump table!");
8363 unsigned trap_opcode;
8365 trap_opcode = ARM::tTRAP;
8382 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
8389 MIB =
BuildMI(DispatchBB, dl, TII->
get(ARM::Int_eh_sjlj_dispatchsetup));
8401 unsigned NumLPads = LPadList.size();
8404 BuildMI(DispatchBB, dl, TII->
get(ARM::t2LDRi12), NewVReg1)
8410 if (NumLPads < 256) {
8411 BuildMI(DispatchBB, dl, TII->
get(ARM::t2CMPri))
8417 BuildMI(DispatchBB, dl, TII->
get(ARM::t2MOVi16), VReg1)
8418 .addImm(NumLPads & 0xFFFF)
8421 unsigned VReg2 = VReg1;
8422 if ((NumLPads & 0xFFFF0000) != 0) {
8424 BuildMI(DispatchBB, dl, TII->
get(ARM::t2MOVTi16), VReg2)
8430 BuildMI(DispatchBB, dl, TII->
get(ARM::t2CMPrr))
8436 BuildMI(DispatchBB, dl, TII->
get(ARM::t2Bcc))
8442 BuildMI(DispContBB, dl, TII->
get(ARM::t2LEApcrelJT), NewVReg3)
8443 .addJumpTableIndex(MJTI)
8447 BuildMI(DispContBB, dl, TII->
get(ARM::t2ADDrs), NewVReg4)
8454 BuildMI(DispContBB, dl, TII->
get(ARM::t2BR_JT))
8458 }
else if (Subtarget->
isThumb()) {
8460 BuildMI(DispatchBB, dl, TII->
get(ARM::tLDRspi), NewVReg1)
8466 if (NumLPads < 256) {
8467 BuildMI(DispatchBB, dl, TII->
get(ARM::tCMPi8))
8483 BuildMI(DispatchBB, dl, TII->
get(ARM::tLDRpci))
8487 BuildMI(DispatchBB, dl, TII->
get(ARM::tCMPr))
8499 BuildMI(DispContBB, dl, TII->
get(ARM::tLSLri), NewVReg2)
8506 BuildMI(DispContBB, dl, TII->
get(ARM::tLEApcrelJT), NewVReg3)
8507 .addJumpTableIndex(MJTI)
8511 BuildMI(DispContBB, dl, TII->
get(ARM::tADDrr), NewVReg4)
8521 BuildMI(DispContBB, dl, TII->
get(ARM::tLDRi), NewVReg5)
8527 unsigned NewVReg6 = NewVReg5;
8528 if (IsPositionIndependent) {
8530 BuildMI(DispContBB, dl, TII->
get(ARM::tADDrr), NewVReg6)
8537 BuildMI(DispContBB, dl, TII->
get(ARM::tBR_JTr))
8542 BuildMI(DispatchBB, dl, TII->
get(ARM::LDRi12), NewVReg1)
8548 if (NumLPads < 256) {
8549 BuildMI(DispatchBB, dl, TII->
get(ARM::CMPri))
8555 BuildMI(DispatchBB, dl, TII->
get(ARM::MOVi16), VReg1)
8556 .addImm(NumLPads & 0xFFFF)
8559 unsigned VReg2 = VReg1;
8560 if ((NumLPads & 0xFFFF0000) != 0) {
8562 BuildMI(DispatchBB, dl, TII->
get(ARM::MOVTi16), VReg2)
8568 BuildMI(DispatchBB, dl, TII->
get(ARM::CMPrr))
8584 BuildMI(DispatchBB, dl, TII->
get(ARM::LDRcp))
8589 BuildMI(DispatchBB, dl, TII->
get(ARM::CMPrr))
8601 BuildMI(DispContBB, dl, TII->
get(ARM::MOVsi), NewVReg3)
8607 BuildMI(DispContBB, dl, TII->
get(ARM::LEApcrelJT), NewVReg4)
8608 .addJumpTableIndex(MJTI)
8614 BuildMI(DispContBB, dl, TII->
get(ARM::LDRrs), NewVReg5)
8621 if (IsPositionIndependent) {
8622 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTadd))
8627 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTr))
8635 for (std::vector<MachineBasicBlock*>::iterator
8636 I = LPadList.begin(),
E = LPadList.end();
I !=
E; ++
I) {
8638 if (SeenMBBs.
insert(CurMBB).second)
8651 while (!Successors.empty()) {
8660 BB->normalizeSuccProbs();
8667 II = BB->rbegin(),
IE = BB->rend(); II !=
IE; ++II) {
8668 if (!II->isCall())
continue;
8672 OI = II->operands_begin(), OE = II->operands_end();
8674 if (!OI->isReg())
continue;
8675 DefRegs[OI->getReg()] =
true;
8680 for (
unsigned i = 0; SavedRegs[i] != 0; ++i) {
8681 unsigned Reg = SavedRegs[i];
8683 !ARM::tGPRRegClass.contains(Reg) &&
8684 !ARM::hGPRRegClass.contains(Reg))
8686 if (Subtarget->
isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
8688 if (!Subtarget->
isThumb() && !ARM::GPRRegClass.contains(Reg))
8702 (*I)->setIsEHPad(
false);
8719 static unsigned getLdOpcode(
unsigned LdSize,
bool IsThumb1,
bool IsThumb2) {
8721 return LdSize == 16 ? ARM::VLD1q32wb_fixed
8722 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
8724 return LdSize == 4 ? ARM::tLDRi
8725 : LdSize == 2 ? ARM::tLDRHi
8726 : LdSize == 1 ? ARM::tLDRBi : 0;
8728 return LdSize == 4 ? ARM::t2LDR_POST
8729 : LdSize == 2 ? ARM::t2LDRH_POST
8730 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
8731 return LdSize == 4 ? ARM::LDR_POST_IMM
8732 : LdSize == 2 ? ARM::LDRH_POST
8733 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
8738 static unsigned getStOpcode(
unsigned StSize,
bool IsThumb1,
bool IsThumb2) {
8740 return StSize == 16 ? ARM::VST1q32wb_fixed
8741 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
8743 return StSize == 4 ? ARM::tSTRi
8744 : StSize == 2 ? ARM::tSTRHi
8745 : StSize == 1 ? ARM::tSTRBi : 0;
8747 return StSize == 4 ? ARM::t2STR_POST
8748 : StSize == 2 ? ARM::t2STRH_POST
8749 : StSize == 1 ? ARM::t2STRB_POST : 0;
8750 return StSize == 4 ? ARM::STR_POST_IMM
8751 : StSize == 2 ? ARM::STRH_POST
8752 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
8759 unsigned LdSize,
unsigned Data,
unsigned AddrIn,
8760 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
8761 unsigned LdOpc =
getLdOpcode(LdSize, IsThumb1, IsThumb2);
8762 assert(LdOpc != 0 &&
"Should have a load opcode");
8769 }
else if (IsThumb1) {
8775 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut)
8780 }
else if (IsThumb2) {
8800 unsigned StSize,
unsigned Data,
unsigned AddrIn,
8801 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
8802 unsigned StOpc =
getStOpcode(StSize, IsThumb1, IsThumb2);
8803 assert(StOpc != 0 &&
"Should have a store opcode");
8805 BuildMI(*BB, Pos, dl, TII->
get(StOpc), AddrOut)
8810 }
else if (IsThumb1) {
8817 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut)
8822 }
else if (IsThumb2) {
8823 BuildMI(*BB, Pos, dl, TII->
get(StOpc), AddrOut)
8829 BuildMI(*BB, Pos, dl, TII->
get(StOpc), AddrOut)
8856 unsigned UnitSize = 0;
8861 bool IsThumb2 = Subtarget->
isThumb2();
8862 bool IsThumb = Subtarget->
isThumb();
8866 }
else if (Align & 2) {
8872 if ((Align % 16 == 0) && SizeVal >= 16)
8874 else if ((Align % 8 == 0) && SizeVal >= 8)
8883 bool IsNeon = UnitSize >= 8;
8884 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
8886 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
8887 : UnitSize == 8 ? &ARM::DPRRegClass
8890 unsigned BytesLeft = SizeVal % UnitSize;
8891 unsigned LoopSize = SizeVal - BytesLeft;
8893 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
8897 unsigned srcIn = src;
8898 unsigned destIn = dest;
8899 for (
unsigned i = 0; i < LoopSize; i+=UnitSize) {
8903 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
8904 IsThumb1, IsThumb2);
8905 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
8906 IsThumb1, IsThumb2);
8914 for (
unsigned i = 0; i < BytesLeft; i++) {
8918 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
8919 IsThumb1, IsThumb2);
8920 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
8921 IsThumb1, IsThumb2);
8961 if (Subtarget->
useMovt(*MF)) {
8962 unsigned Vtmp = varEnd;
8963 if ((LoopSize & 0xFFFF0000) != 0)
8965 BuildMI(BB, dl, TII->
get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
8966 .addImm(LoopSize & 0xFFFF)
8969 if ((LoopSize & 0xFFFF0000) != 0)
8970 BuildMI(BB, dl, TII->
get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
9013 .addReg(varLoop).
addMBB(loopMBB)
9016 .addReg(srcLoop).
addMBB(loopMBB)
9019 .addReg(destLoop).
addMBB(loopMBB)
9026 IsThumb1, IsThumb2);
9028 IsThumb1, IsThumb2);
9040 TII->
get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
9049 TII->
get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
9058 auto StartOfExit = exitMBB->
begin();
9062 unsigned srcIn = srcLoop;
9063 unsigned destIn = destLoop;
9064 for (
unsigned i = 0; i < BytesLeft; i++) {
9068 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
9069 IsThumb1, IsThumb2);
9070 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
9071 IsThumb1, IsThumb2);
9081 ARMTargetLowering::EmitLowered__chkstk(
MachineInstr &MI,
9088 "__chkstk is only supported on Windows");
9089 assert(Subtarget->
isThumb2() &&
"Windows on ARM requires Thumb-2 mode");
9117 .addExternalSymbol(
"__chkstk")
9130 .addExternalSymbol(
"__chkstk");
9144 BuildMI(*MBB, MI, DL, TII.
get(ARM::t2SUBrr), ARM::SP)
9156 ARMTargetLowering::EmitLowered__dbzchk(
MachineInstr &MI,
9170 BuildMI(TrapBB, DL, TII->
get(ARM::t__brkdiv0));
9207 if (miI == BB->
end()) {
9210 sItr != sEnd; ++sItr) {
9219 SelectItr->addRegisterKilled(ARM::CPSR, TRI);
9228 bool isThumb2 = Subtarget->
isThumb2();
9236 case ARM::tLDR_postidx: {
9238 BuildMI(*BB, MI, dl, TII->
get(ARM::tLDMIA_UPD))
9251 case ARM::t2STR_preidx:
9254 case ARM::t2STRB_preidx:
9257 case ARM::t2STRH_preidx:
9261 case ARM::STRi_preidx:
9262 case ARM::STRBi_preidx: {
9263 unsigned NewOpc = MI.
getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
9264 : ARM::STRB_PRE_IMM;
9280 .addMemOperand(MMO);
9284 case ARM::STRr_preidx:
9285 case ARM::STRBr_preidx:
9286 case ARM::STRH_preidx: {
9290 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG;
break;
9291 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG;
break;
9292 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE;
break;
9301 case ARM::tMOVCCr_pseudo: {
9366 case ARM::BCCZi64: {
9372 bool RHSisZero = MI.
getOpcode() == ARM::BCCZi64;
9377 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9381 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9387 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9391 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9392 .addReg(LHS2).
addReg(RHS2)
9401 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
9414 case ARM::Int_eh_sjlj_setjmp:
9415 case ARM::Int_eh_sjlj_setjmp_nofp:
9416 case ARM::tInt_eh_sjlj_setjmp:
9417 case ARM::t2Int_eh_sjlj_setjmp:
9418 case ARM::t2Int_eh_sjlj_setjmp_nofp:
9421 case ARM::Int_eh_sjlj_setup_dispatch:
9422 EmitSjLjDispatchBlock(MI, BB);
9450 bool isThumb2 = Subtarget->
isThumb2();
9454 unsigned NewRsbDstReg =
9469 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9476 TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
9483 TII->
get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
9492 TII->
get(ARM::PHI), ABSDstReg)
9493 .addReg(NewRsbDstReg).
addMBB(RSBBB)
9502 case ARM::COPY_STRUCT_BYVAL_I32:
9504 return EmitStructByval(MI, BB);
9506 return EmitLowered__chkstk(MI, BB);
9508 return EmitLowered__dbzchk(MI, BB);
9535 : &ARM::GPRRegClass);
9560 MCID = &TII->get(NewOpc);
9564 &&
"converted opcode should be the same except for cc_out" 9565 " (and, on Thumb1, pred)");
9600 assert(!NewOpc &&
"Optional cc_out operand required");
9605 bool definesCPSR =
false;
9606 bool deadCPSR =
false;
9619 assert(!NewOpc &&
"Optional cc_out operand required");
9625 "expect uninitialized optional cc_out operand");
9664 default:
return false;
9735 bool AllOnes =
false) {
9742 NonConstantVal, DAG))
9748 OtherOp, NonConstantVal);
9754 CCOp, TrueVal, FalseVal);
9891 unsigned nextIndex = 0;
9914 || C1->getZExtValue() != nextIndex+1)
9959 return DAG.
getNode(ExtOp, dl, VT, tmp);
9973 if (!Subtarget->
hasDSP())
9999 if (
auto Const = dyn_cast<ConstantSDNode>(SRA.
getOperand(1))) {
10000 if (Const->getZExtValue() != 31)
10009 SDLoc dl(AddcNode);
10010 unsigned Opcode = 0;
10045 SDValue resNode(AddcNode, 0);
10074 "Expect an ADDE or SUBE");
10078 "ADDE node has the wrong inputs");
10095 assert(AddcSubcNode->getNumValues() == 2 &&
10096 AddcSubcNode->getValueType(0) ==
MVT::i32 &&
10097 "Expect ADDC with two result values. First: i32");
10117 bool IsLeftOperandMUL =
false;
10122 IsLeftOperandMUL =
true;
10133 SDValue *LowAddSub =
nullptr;
10136 if ((AddeSubeOp0 != MULOp.
getValue(1)) && (AddeSubeOp1 != MULOp.
getValue(1)))
10139 if (IsLeftOperandMUL)
10140 HiAddSub = &AddeSubeOp1;
10142 HiAddSub = &AddeSubeOp0;
10147 if (AddcSubcOp0 == MULOp.
getValue(0)) {
10148 LoMul = &AddcSubcOp0;
10149 LowAddSub = &AddcSubcOp1;
10151 if (AddcSubcOp1 == MULOp.
getValue(0)) {
10152 LoMul = &AddcSubcOp1;
10153 LowAddSub = &AddcSubcOp0;
10161 if (AddcSubcNode == HiAddSub->getNode() ||
10180 static_cast<ConstantSDNode *
>(LowAddSub->getNode())->getZExtValue() ==
10191 return SDValue(AddeSubeNode, 0);
10212 return SDValue(AddeSubeNode, 0);
10233 SDNode *UmlalNode =
nullptr;
10311 int32_t imm =
C->getSExtValue();
10312 if (imm < 0 && imm > std::numeric_limits<int>::min()) {
10332 int64_t imm =
C->getSExtValue();
10452 for (
auto U : N->
uses()) {
10453 switch(U->getOpcode()) {
10466 if (isa<ConstantSDNode>(U->getOperand(0)) ||
10467 isa<ConstantSDNode>(U->getOperand(1)))
10471 if (U->getOperand(0).getOpcode() ==
ISD::SHL ||
10472 U->getOperand(1).getOpcode() ==
ISD::SHL)
10489 if (!C1ShlC2 || !C2)
10492 APInt C2Int = C2->getAPIntValue();
10493 APInt C1Int = C1ShlC2->getAPIntValue();
10498 if ((C1Int & Mask) != C1Int)
10505 auto LargeImm = [](
const APInt &Imm) {
10506 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
10507 return Imm.getBitWidth() - Zeros > 8;
10510 if (LargeImm(C1Int) || LargeImm(C2Int))
10604 return DAG.
getNode(Opcode, DL, VT,
10631 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
10633 ShiftAmt = ShiftAmt & (32 - 1);
10638 MulAmt >>= ShiftAmt;
10660 uint64_t MulAmtAbs = -MulAmt;
10708 if (C1 == 255 || C1 == 65535)
10725 if (!C2 || C2 >= 32)
10769 if (Trailing == C2 && C2 + C3 < 32) {
10782 if (Leading == C2 && C2 + C3 < 32) {
10808 APInt SplatBits, SplatUndef;
10809 unsigned SplatBitSize;
10812 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10813 if (SplatBitSize <= 64) {
10890 unsigned Opcode = 0;
10891 if (
isS16(OpS16, DAG))
10940 if (Mask == 0xffff)
10947 if ((Val & ~Mask) != Val)
10972 (Mask == ~Mask2)) {
10975 if (Subtarget->
hasDSP() &&
10976 (Mask == 0xffff || Mask == 0xffff0000))
10989 (~Mask == Mask2)) {
10992 if (Subtarget->
hasDSP() &&
10993 (Mask2 == 0xffff || Mask2 == 0xffff0000))
11014 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
11044 APInt SplatBits, SplatUndef;
11045 unsigned SplatBitSize;
11047 if (BVN && Subtarget->
hasNEON() &&
11048 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
11049 if (SplatBitSize <= 64) {
11086 unsigned SplatBitSize;
11089 APInt SplatBits0, SplatBits1;
11093 if (BVN0 && BVN0->
isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
11094 HasAnyUndefs) && !HasAnyUndefs) {
11095 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
11096 HasAnyUndefs) && !HasAnyUndefs) {
11101 SplatBits0 == ~SplatBits1) {
11156 ToMask = ~cast<ConstantSDNode>(N->
getOperand(2))->getAPIntValue();
11161 if (From->getOpcode() ==
ISD::SRL &&
11162 isa<ConstantSDNode>(From->getOperand(1))) {
11163 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
11166 From = From->getOperand(0);
11178 return LastActiveBitInA - 1 == FirstActiveBitInB;
11184 APInt ToMask, FromMask;
11192 APInt CombinedToMask = ToMask;
11194 APInt NewToMask, NewFromMask;
11196 if (NewFrom != From) {
11198 CombinedToMask |= NewToMask;
11204 if ((NewToMask & CombinedToMask).getBoolValue())
11217 CombinedToMask |= NewToMask;
11234 unsigned InvMask = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
11238 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
11239 "undefined behavior");
11240 unsigned Mask = (1u << Width) - 1;
11242 if ((Mask & (~Mask2)) == 0)
11255 APInt ToMask1, FromMask1;
11258 APInt ToMask2, FromMask2;
11266 APInt NewFromMask = FromMask1 | FromMask2;
11267 APInt NewToMask = ToMask1 | ToMask2;
11272 if (NewFromMask[0] == 0)
11309 DAG.getConstant(4, DL,
MVT::i32));
11310 SDValue NewLD2 = DAG.getLoad(
11348 for (
unsigned i = 0; i < NumElts; ++i) {
11378 for (
unsigned i = 0; i < NumElts; ++i) {
11425 unsigned NumOfBitCastedElts = 0;
11427 unsigned NumOfRelevantElts = NumElts;
11428 for (
unsigned Idx = 0; Idx < NumElts; ++Idx) {
11433 ++NumOfBitCastedElts;
11434 }
else if (Elt.
isUndef() || isa<ConstantSDNode>(Elt))
11437 --NumOfRelevantElts;
11441 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
11449 if (!TLI.isTypeLegal(VecVT))
11459 for (
unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
11544 unsigned HalfElts = NumElts/2;
11546 for (
unsigned n = 0; n < NumElts; ++n) {
11549 if (MaskElt < (
int)HalfElts)
11551 else if (MaskElt >= (
int)NumElts && MaskElt < (
int)(NumElts + HalfElts))
11552 NewElt = HalfElts + MaskElt - NumElts;
11570 const unsigned AddrOpIdx = ((isIntrinsic ||
isStore) ? 2 : 1);
11580 UI.getUse().getResNo() != Addr.
getResNo())
11596 bool isLoadOp =
true;
11597 bool isLaneOp =
false;
11598 unsigned NewOpc = 0;
11599 unsigned NumVecs = 0;
11601 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
11605 NumVecs = 1;
break;
11607 NumVecs = 2;
break;
11609 NumVecs = 3;
break;
11611 NumVecs = 4;
break;
11619 NumVecs = 2; isLaneOp =
true;
break;
11621 NumVecs = 3; isLaneOp =
true;
break;
11623 NumVecs = 4; isLaneOp =
true;
break;
11625 NumVecs = 1; isLoadOp =
false;
break;
11627 NumVecs = 2; isLoadOp =
false;
break;
11629 NumVecs = 3; isLoadOp =
false;
break;
11631 NumVecs = 4; isLoadOp =
false;
break;
11633 NumVecs = 2; isLoadOp =
false; isLaneOp =
true;
break;
11635 NumVecs = 3; isLoadOp =
false; isLaneOp =
true;
break;
11637 NumVecs = 4; isLoadOp =
false; isLaneOp =
true;
break;
11648 NumVecs = 1; isLaneOp =
false;
break;
11650 NumVecs = 1; isLaneOp =
false; isLoadOp =
false;
break;
11658 }
else if (isIntrinsic) {
11661 assert(isStore &&
"Node has to be a load, a store, or an intrinsic!");
11672 if (NumBytes >= 3 * 16 && (!CInc || CInc->
getZExtValue() != NumBytes)) {
11681 EVT AlignedVecTy = VecTy;
11698 if (isa<LSBaseSDNode>(N)) {
11699 if (Alignment == 0)
11703 assert(NumVecs == 1 &&
"Unexpected multi-element generic load/store.");
11704 assert(!isLaneOp &&
"Unexpected generic load/store lane.");
11721 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
11723 for (n = 0; n < NumResultVecs; ++n)
11724 Tys[n] = AlignedVecTy;
11735 if (
StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
11741 for (
unsigned i = AddrOpIdx + 1; i < N->
getNumOperands() - 1; ++i)
11761 for (
unsigned i = 0; i < NumResultVecs; ++i)
11767 SDValue &LdVal = NewResults[0];
11803 unsigned NumVecs = 0;
11804 unsigned NewOpc = 0;
11805 unsigned IntNo = cast<ConstantSDNode>(VLD->
getOperand(1))->getZExtValue();
11821 unsigned VLDLaneNo =
11822 cast<ConstantSDNode>(VLD->
getOperand(NumVecs+3))->getZExtValue();
11826 if (UI.getUse().getResNo() == NumVecs)
11830 VLDLaneNo != cast<ConstantSDNode>(User->
getOperand(1))->getZExtValue())
11837 for (n = 0; n < NumVecs; ++n)
11850 unsigned ResNo = UI.getUse().
getResNo();
11852 if (ResNo == NumVecs)
11860 std::vector<SDValue> VLDDupResults;
11861 for (
unsigned n = 0; n < NumVecs; ++n)
11862 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), n));
11863 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), NumVecs));
11890 unsigned Imm = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
11956 assert(StVT != VT &&
"Cannot truncate to the same type");
11965 if (0 != (NumElems * FromEltSz) % ToEltSz)
return SDValue();
11967 unsigned SizeRatio = FromEltSz / ToEltSz;
11972 NumElems*SizeRatio);
11978 for (
unsigned i = 0; i < NumElems; ++i)
11980 ? (i + 1) * SizeRatio - 1
11995 if (TLI.
isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
12014 for (
unsigned I = 0;
I <
E;
I++) {
12016 StoreType, ShuffWide,
12105 if (!isa<BuildVectorSDNode>(ConstVec))
12113 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
12124 if (C == -1 || C == 0 || C > 32)
12136 if (IntBits < FloatBits)
12163 if (!isa<BuildVectorSDNode>(ConstVec))
12171 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
12182 if (C == -1 || C == 0 || C > 32)
12188 if (IntBits < FloatBits)
12209 APInt SplatBits, SplatUndef;
12210 unsigned SplatBitSize;
12212 if (! BVN || ! BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
12213 HasAnyUndefs, ElementBits) ||
12214 SplatBitSize > ElementBits)
12225 assert(VT.
isVector() &&
"vector shift count is not a vector type");
12229 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
12240 assert(VT.
isVector() &&
"vector shift count is not a vector type");
12245 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
12246 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
12255 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
12282 unsigned VShiftOpc = 0;
12371 unsigned VShiftOpc = 0;
12469 isa<ConstantSDNode>(Lane)) {
12513 auto CCNode = cast<ConstantSDNode>(CMOV->
getOperand(2));
12514 auto CC = CCNode->getAPIntValue().getLimitedValue();
12551 unsigned Heuristic = Subtarget->
isThumb() ? 3 : 2;
12558 if ((OrCI & Known.
Zero) != OrCI)
12565 unsigned BitInX = AndC->
logBase2();
12573 for (
unsigned BitInY = 0, NumActiveBits = OrCI.
getActiveBits();
12574 BitInY < NumActiveBits; ++BitInY) {
12575 if (OrCI[BitInY] == 0)
12578 Mask.setBit(BitInY);
12614 if ((LHS00C && LHS00C->getZExtValue() == 0) &&
12615 (LHS01C && LHS01C->getZExtValue() == 1) &&
12616 (LHS1C && LHS1C->getZExtValue() == 1) &&
12617 (RHSC && RHSC->getZExtValue() == 0)) {
12670 if (CC ==
ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
12673 }
else if (CC ==
ARMCC::EQ && TrueVal == RHS) {
12686 if ((LHS0C && LHS0C->getZExtValue() == 0) &&
12687 (LHS1C && LHS1C->getZExtValue() == 1) &&
12688 (RHSC && RHSC->getZExtValue() == 0)) {
12765 const APInt *TrueConst;
12771 unsigned ShiftAmount = TrueConst->
logBase2();
12789 if (Known.
Zero == 0xfffffffe)
12792 else if (Known.
Zero == 0xffffff00)
12795 else if (Known.
Zero == 0xffff0000)
12901 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
12941 bool *
Fast)
const {
12956 if (AllowsUnaligned) {
12968 if (Subtarget->
hasNEON() && (AllowsUnaligned || Subtarget->
isLittle())) {
12979 unsigned AlignCheck) {
12980 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
12981 (DstAlign == 0 || DstAlign % AlignCheck == 0));
12985 unsigned DstAlign,
unsigned SrcAlign,
12986 bool IsMemset,
bool ZeroMemset,
12992 if ((!IsMemset || ZeroMemset) && Subtarget->
hasNEON() &&
12999 }
else if (Size >= 8 &&
13019 return (SrcBits == 64 && DestBits == 32);
13028 return (SrcBits == 64 && DestBits == 32);
13108 unsigned AS)
const {
13111 return AM.
Scale < 0 ? 1 : 0;
13121 unsigned Scale = 1;
13123 default:
return false;
13138 if ((V & (Scale - 1)) != 0)
13141 return V == (V & ((1LL << 5) - 1));
13146 bool isNeg =
false;
13153 default:
return false;
13160 return V == (V & ((1LL << 8) - 1));
13161 return V == (V & ((1LL << 12) - 1));
13170 return V == (V & ((1LL << 8) - 1));
13194 default:
return false;
13199 return V == (V & ((1LL << 12) - 1));
13202 return V == (V & ((1LL << 8) - 1));
13210 return V == (V & ((1LL << 8) - 1));
13216 int Scale = AM.
Scale;
13221 default:
return false;
13229 Scale = Scale & ~1;
13230 return Scale == 2 || Scale == 4 || Scale == 8;
13247 if (Scale & 1)
return false;
13254 const int Scale = AM.
Scale;
13264 return (Scale == 1) || (!AM.
HasBaseReg && Scale == 2);
13280 switch (AM.
Scale) {
13297 int Scale = AM.
Scale;
13299 default:
return false;
13303 if (Scale < 0) Scale = -Scale;
13311 if (Scale == 1 || (AM.
HasBaseReg && Scale == -1))
13324 if (Scale & 1)
return false;
13344 return Imm >= 0 && Imm <= 255;
13359 return AbsImm >= 0 && AbsImm <= 255;
13373 int RHSC = (int)RHS->getZExtValue();
13374 if (RHSC < 0 && RHSC > -256) {
13387 int RHSC = (int)RHS->getZExtValue();
13388 if (RHSC < 0 && RHSC > -0x1000) {
13430 int RHSC = (int)RHS->getZExtValue();
13431 if (RHSC < 0 && RHSC > -0x100) {
13436 }
else if (RHSC > 0 && RHSC < 0x100) {
13461 Ptr =
LD->getBasePtr();
13462 VT =
LD->getMemoryVT();
13464 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
13465 Ptr = ST->getBasePtr();
13466 VT = ST->getMemoryVT();
13471 bool isLegal =
false;
13497 VT =
LD->getMemoryVT();
13498 Ptr =
LD->getBasePtr();
13501 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
13502 VT = ST->getMemoryVT();
13503 Ptr = ST->getBasePtr();
13504 isNonExt = !ST->isTruncatingStore();
13515 if (!RHS || RHS->getZExtValue() != 4)
13525 bool isLegal =
false;
13553 const APInt &DemandedElts,
13555 unsigned Depth)
const {
13584 Known.
One &= KnownRHS.
One;
13594 EVT VT = cast<MemIntrinsicSDNode>(
Op)->getMemoryVT();
13622 "VGETLANE index out of bounds");
13633 Known = Known.
sext(DstSz);
13635 Known = Known.
zext(DstSz);
13646 const APInt &DemandedAPInt,
13673 unsigned ShrunkMask = Mask & Demanded;
13674 unsigned ExpandedMask = Mask | ~Demanded;
13678 if (ShrunkMask == 0)
13684 if (ExpandedMask == ~0U)
13687 auto IsLegalMask = [ShrunkMask, ExpandedMask](
unsigned Mask) ->
bool {
13688 return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask &
Mask) == 0;
13690 auto UseMask = [
Mask,
Op, VT, &TLO](
unsigned NewMask) ->
bool {
13691 if (NewMask == Mask)
13700 if (IsLegalMask(0xFF))
13701 return UseMask(0xFF);
13704 if (IsLegalMask(0xFFFF))
13705 return UseMask(0xFFFF);
13709 if (ShrunkMask < 256)
13710 return UseMask(ShrunkMask);
13714 if ((
int)ExpandedMask <= -2 && (
int)ExpandedMask >= -256)
13715 return UseMask(ExpandedMask);
13742 switch (AsmPieces.
size()) {
13743 default:
return false;
13745 AsmStr = AsmPieces[0];
13750 if (AsmPieces.
size() == 3 &&
13751 AsmPieces[0] ==
"rev" && AsmPieces[1] ==
"$0" && AsmPieces[2] ==
"$1" &&
13787 if (Constraint.
size() == 1) {
13788 switch (Constraint[0]) {
13800 }
else if (Constraint.
size() == 2) {
13801 switch (Constraint[0]) {
13820 if (!CallOperandVal)
13824 switch (*constraint) {
13844 using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
13848 if (Constraint.
size() == 1) {
13850 switch (Constraint[0]) {
13853 return RCPair(0U, &ARM::tGPRRegClass);
13854 return RCPair(0U, &ARM::GPRRegClass);
13857 return RCPair(0U, &ARM::hGPRRegClass);
13861 return RCPair(0U, &ARM::tGPRRegClass);
13862 return RCPair(0U, &ARM::GPRRegClass);
13867 return RCPair(0U, &ARM::SPRRegClass);
13869 return RCPair(0U, &ARM::DPRRegClass);
13871 return RCPair(0U, &ARM::QPRRegClass);
13877 return RCPair(0U, &ARM::SPR_8RegClass);
13879 return RCPair(0U, &ARM::DPR_8RegClass);
13881 return RCPair(0U, &ARM::QPR_8RegClass);
13887 return RCPair(0U, &ARM::SPRRegClass);
13889 return RCPair(0U, &ARM::DPR_VFP2RegClass);
13891 return RCPair(0U, &ARM::QPR_VFP2RegClass);
13896 return std::make_pair(
unsigned(ARM::CPSR), &ARM::CCRRegClass);
13904 std::string &Constraint,
13905 std::vector<SDValue>&Ops,
13910 if (Constraint.length() != 1)
return;
13912 char ConstraintLetter = Constraint[0];
13913 switch (ConstraintLetter) {
13916 case 'I':
case 'J':
case 'K':
case 'L':
13917 case 'M':
case 'N':
case 'O':
13923 int CVal = (int) CVal64;
13926 if (CVal != CVal64)
13929 switch (ConstraintLetter) {
13934 if (CVal >= 0 && CVal <= 65535)
13941 if (CVal >= 0 && CVal <= 255)
13943 }
else if (Subtarget->
isThumb2()) {
13962 if (CVal >= -255 && CVal <= -1)
13968 if (CVal >= -4095 && CVal <= 4095)
13981 }
else if (Subtarget->
isThumb2()) {
14004 if (CVal >= -7 && CVal < 7)
14006 }
else if (Subtarget->
isThumb2()) {
14029 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
14035 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
14043 if (CVal >= 0 && CVal <= 31)
14052 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
14062 Ops.push_back(Result);
14072 "Unhandled Opcode in getDivRemLibcall");
14078 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
break;
14079 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
break;
14080 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
break;
14081 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64;
break;
14090 "Unhandled Opcode in getDivRemArgList");
14094 TargetLowering::ArgListEntry Entry;
14100 Entry.IsSExt = isSigned;
14101 Entry.IsZExt = !isSigned;
14102 Args.push_back(Entry);
14113 "Register-based DivRem lowering only");
14116 "Invalid opcode for Div/Rem lowering");
14134 SDValue Div = DAG.
getNode(DivOpcode, dl, VT, Dividend, Divisor);
14138 SDValue Values[2] = {Div, Rem};
14164 return CallInfo.first;
14171 std::vector<Type*> RetTyParams;
14172 Type *RetTyElement;
14182 RetTyParams.push_back(RetTyElement);
14183 RetTyParams.push_back(RetTyElement);
14200 CallLoweringInfo CLI(DAG);
14201 CLI.setChain(InChain)
14203 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(
SDLoc(N));
14204 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
14207 SDNode *ResNode = CallResult.first.getNode();
14222 "no-stack-arg-probe")) {
14223 unsigned Align = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
14231 SDValue Ops[2] = { SP, Chain };
14248 SDValue Ops[2] = { NewSP, Chain };
14254 "Unexpected type for custom-lowering FP_EXTEND");
14267 "Unexpected type for custom-lowering FP_ROUND");
14284 if (v == 0xffffffff)
14313 unsigned Intrinsic)
const {
14314 switch (Intrinsic) {
14333 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
14363 unsigned NumElts = 0;
14368 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
14374 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
14385 unsigned NumElts = 0;
14390 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
14458 if (Bits == 0 || Bits > 32)
14464 unsigned Index)
const {
14550 return (Size == 64) && !Subtarget->
isMClass();
14573 return (Size <= (Subtarget->
isMClass() ? 32U : 64U) && hasAtomicRMW)
14585 bool HasAtomicCmpXchg =
14594 return InsertFencesForAtomic;
14603 unsigned &Cost)
const {
14617 if (!isa<ConstantInt>(Idx))
14621 unsigned BitWidth = cast<VectorType>(VectorTy)->
getBitWidth();
14624 if (BitWidth == 64 || BitWidth == 128) {
14642 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
14672 cast<PointerType>(Addr->
getType())->getElementType());
14746 if (ElSize != 8 && ElSize != 16 && ElSize != 32)
14751 return VecSize == 64 || VecSize % 128 == 0;
14769 "Invalid interleave factor");
14770 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
14772 "Unmatched number of shufflevectors and indices");
14798 if (NumLoads > 1) {
14802 VecTy->getVectorNumElements() / NumLoads);
14808 BaseAddr, VecTy->getVectorElementType()->getPointerTo(
14815 Type *Tys[] = {VecTy, Int8Ptr};
14827 for (
unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
14832 BaseAddr, VecTy->getVectorNumElements() * Factor);
14842 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
14844 unsigned Index = Indices[i];
14854 SubVecs[SV].push_back(SubVec);
14863 auto &SubVec = SubVecs[SVI];
14866 SVI->replaceAllUsesWith(WideVec);
14900 unsigned Factor)
const {
14902 "Invalid interleave factor");
14906 "Invalid interleaved store");
14943 if (NumStores > 1) {
14946 LaneLen /= NumStores;
14962 Type *Tys[] = {Int8Ptr, SubVecTy};
14967 for (
unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
14970 if (StoreCount > 0)
14980 for (
unsigned i = 0; i < Factor; i++) {
14981 unsigned IdxI = StoreCount * LaneLen * Factor + i;
14982 if (
Mask[IdxI] >= 0) {
14986 unsigned StartMask = 0;
14987 for (
unsigned j = 1; j < LaneLen; j++) {
14988 unsigned IdxJ = StoreCount * LaneLen * Factor + j;
14989 if (
Mask[IdxJ * Factor + IdxI] >= 0) {
14990 StartMask =
Mask[IdxJ * Factor + IdxI] - IdxJ;
15020 uint64_t &Members) {
15021 if (
auto *ST = dyn_cast<StructType>(Ty)) {
15022 for (
unsigned i = 0; i < ST->getNumElements(); ++i) {
15023 uint64_t SubMembers = 0;
15026 Members += SubMembers;
15028 }
else if (
auto *AT = dyn_cast<ArrayType>(Ty)) {
15029 uint64_t SubMembers = 0;
15032 Members += SubMembers * AT->getNumElements();
15043 }
else if (
auto *VT = dyn_cast<VectorType>(Ty)) {
15050 return VT->getBitWidth() == 64;
15052 return VT->getBitWidth() == 128;
15054 switch (VT->getBitWidth()) {
15067 return (Members > 0 && Members <= 4);
15087 if (getEffectiveCallingConv(CallConv, isVarArg) !=
15092 uint64_t Members = 0;
15097 return IsHA || IsIntArray;
15101 const Constant *PersonalityFn)
const {
15104 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R0;
15108 const Constant *PersonalityFn)
const {
15111 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R1;
15120 void ARMTargetLowering::insertCopiesSplitCSR(
15134 RC = &ARM::GPRRegClass;
15135 else if (ARM::DPRRegClass.
contains(*
I))
15136 RC = &ARM::DPRRegClass;
15148 "Function should be nounwind in insertCopiesSplitCSR!");
15154 for (
auto *Exit : Exits)
15156 TII->
get(TargetOpcode::COPY), *
I)
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG)
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG)
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs...
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
bool isMachineConstantPoolEntry() const
Type * getVectorElementType() const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Value * getValueOperand()
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, SelectionDAG &DAG)
BC is a bitcast that is about to be turned into a VMOVDRR.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
const MachineInstrBuilder & add(const MachineOperand &MO) const
static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, SelectionDAG &DAG)
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
A parsed version of the target data layout string in and methods for querying it. ...
static bool isVZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
EVT getValueType() const
Return the ValueType of the referenced return value.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isTargetGNUAEABI() const
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
void markGlobalAsPromotedToConstantPool(const GlobalVariable *GV)
Indicate to the backend that GV has had its storage changed to inside a constant pool.
void finalizeLowering(MachineFunction &MF) const override
Execute target specific actions to finalize target lowering.
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
bool hasCallSiteLandingPad(MCSymbol *Sym)
Return true if the landing pad Eh symbol has an associated call site.
static bool isConstant(const MachineInstr &MI)
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
static MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.
C - The default llvm calling convention, compatible with C.
static const APInt * isPowerOf2Constant(SDValue V)
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
uint64_t getZExtValue() const
Get zero extended value.
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN)
FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
MachineBasicBlock * getMBB() const
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain)
Atomic ordering constants.
int getFunctionContextIndex() const
Return the index for the function context object.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG)
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
This class represents lattice values for constants.
static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMULCombine Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the special multi...
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
bool hasDivideInThumbMode() const
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG)
std::pair< unsigned, const TargetRegisterClass * > RCPair
Type * getParamType(unsigned i) const
Parameter type accessors.
const unsigned char * bytes_end() const
int getFP16Imm(const APInt &Imm)
getFP16Imm - Return an 8-bit floating-point version of the 16-bit floating-point value.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static bool IsVUZPShuffleNode(SDNode *N)
TOF
Target Operand Flag enum.
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
StringRef getPrivateGlobalPrefix() const
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
A Module instance is used to store all the information related to an LLVM module. ...
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
static SDValue PerformADDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDECombine - Target-specific dag combine transform from ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL.
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
int getSplatIndex() const
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
const SDValue & getBasePtr() const
static SDValue PerformInsertEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformInsertEltCombine - Target-specific dag combine xforms for ISD::INSERT_VECTOR_ELT.
EABI EABIVersion
EABIVersion - This flag specifies the EABI version.
static bool isVTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG &DAG)
SkipLoadExtensionForVMULL - return a load of the original vector size that does not do any sign/zero ...
void push_back(const T &Elt)
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
ARMConstantPoolValue - ARM specific constantpool value.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each element has been zero/sign-...
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
Describe properties that are true of each instruction in the target description file.
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, uint64_t &Members)
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
MachineMemOperand::Flags flags
Y = RRC X, rotate right via carry.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
static bool hasNormalLoadOperand(SDNode *N)
hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node are normal, non-volatile loads.
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Returns true if the addresing mode representing by AM is legal for the Thumb1 target, for a load/store of the specified type.
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
SDVTList getVTList() const
unsigned getABIAlignmentForCallingConv(Type *ArgTy, DataLayout DL) const override
Return the correct alignment for the current calling convention.
This class represents a function call, abstracting a target machine's calling convention.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
Global Offset Table, Thread Pointer Offset.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
static unsigned SelectPairHalf(unsigned Elements, ArrayRef< int > Mask, unsigned Index)
static MVT getFloatingPointVT(unsigned BitWidth)
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
const std::string & getAsmString() const
const SDValue & getChain() const
static bool isLegalT1AddressImmediate(int64_t V, EVT VT)
static cl::opt< bool > ARMInterworking("arm-interworking", cl::Hidden, cl::desc("Enable / disable ARM interworking (for debugging only)"), cl::init(true))
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool isTargetCOFF() const
unsigned getValNo() const
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
unsigned getAlignment() const
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG)
SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending load, or BUILD_VECTOR with extended elements, return the unextended value.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, const ARMSubtarget *ST, const SDLoc &dl)
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
auto count_if(R &&Range, UnaryPredicate P) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
bool sgt(const APInt &RHS) const
Signed greather than comparison.
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
bool hasDLLImportStorageClass() const
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
APInt trunc(unsigned width) const
Truncate to new width.
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
void setIsDead(bool Val=true)
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
An instruction for reading from memory.
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1...
bool isThumb1Only() const
static IntegerType * getInt64Ty(LLVMContext &C)
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL) const
Soften the operands of a comparison.
bool hasExternalWeakLinkage() const
bool isTargetMuslAEABI() const
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
const SDNodeFlags getFlags() const
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
SDNode * getNode() const
get the SDNode which holds the desired result
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
unsigned createNEONModImm(unsigned OpCmode, unsigned Val)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isVectorTy() const
True if this is an instance of VectorType.
bool hasAcquireRelease() const
bool alignLoopsWithOptSize() const override
Should loops be aligned even when the function is marked OptSize (but not MinSize).
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Global Offset Table, PC Relative.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
static IntegerType * getInt16Ty(LLVMContext &C)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, NEON load/store intrinsics...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
static SDValue PerformSUBCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
unsigned getBitWidth() const
Get the bit width of this value.
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst *> Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
bool isTargetHardFloat() const
uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits)
decodeNEONModImm - Decode a NEON modified immediate value into the element value and the element size...
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
constexpr bool isMask_32(uint32_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
static bool isThumb(const MCSubtargetInfo &STI)
unsigned createPICLabelUId()
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
static bool allUsersAreInFunction(const Value *V, const Function *F)
Return true if all users of V are within function F, looking through ConstantExprs.
unsigned getBitWidth() const
Return the number of bits in the APInt.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
SDValue getExternalSymbol(const char *Sym, EVT VT)
return AArch64::GPR64RegClass contains(Reg)
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist...
bool genExecuteOnly() const
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode *> &Visited, SmallVectorImpl< const SDNode *> &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
const ARMBaseInstrInfo * getInstrInfo() const override
bool hasV8MBaselineOps() const
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Value * getArgOperand(unsigned i) const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "...
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isBeforeLegalize() const
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
A description of a memory reference used in the backend.
amdgpu Simplify well known AMD library false Value Value const Twine & Name
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MO_SBREL - On a symbol operand, this represents a static base relative relocation.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static SDValue PerformLOADCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
const HexagonInstrInfo * TII
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) intrinsic...
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Shift and rotation operations.
static bool isVUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
unsigned getNumOperands() const
Retuns the total number of operands.
static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
std::size_t countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
LLVMContext & getContext() const
Get the global data context.
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef...
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
A Use represents the edge between a Value definition and its users.
static SDValue findMUL_LOHI(SDValue V)
static bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
static bool isVZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of "vector_shuffle v...
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
unsigned getFrameRegister(const MachineFunction &MF) const override
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG)
PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for ISD::VECTOR_SHUFFLE.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
bool isIntegerTy() const
True if this is an instance of IntegerType.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
unsigned getScalarValueSizeInBits() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
This file contains the simple types necessary to represent the attributes associated with functions a...
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getZeroVector - Returns a vector of specified type with all zero elements.
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The memory access is dereferenceable (i.e., doesn't trap).
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
static bool isSingletonVEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
const DataLayout & getDataLayout() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
isZeroExtended - Check if a node is a vector value that is zero-extended or a constant BUILD_VECTOR w...
static ShiftOpc getShiftOpcForNode(unsigned Opcode)
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
uint64_t getNumElements() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
Get the call site indexes for a landing pad EH symbol.
LocInfo getLocInfo() const
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
KnownBits zext(unsigned BitWidth)
Zero extends the underlying known Zero and One bits.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
static StructType * get(LLVMContext &Context, ArrayRef< Type *> Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
This file implements a class to represent arbitrary precision integral constant values and operations...
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \\\)
SplitString - Split up the specified string according to the specified delimiters, appending the result fragments to the output list.
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2)
Return the store opcode for a given store size.
unsigned getArgRegsSaveSize() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
STACKSAVE - STACKSAVE has one operand, an input chain.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
unsigned getSizeInBits() const
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
static const unsigned PerfectShuffleTable[6561+1]
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
unsigned getActiveBits() const
Compute the number of active bits in the value.
int64_t getSExtValue() const
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
unsigned getScalarSizeInBits() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
int64_t getSExtValue() const
Get sign extended value.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
Constant * createSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
Type * getType() const
All values are typed, get the type of this value.
MachineFunction & getMachineFunction() const
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here...
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
bool useMovt(const MachineFunction &MF) const
SDValue getRegisterMask(const uint32_t *RegMask)
static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, const GlobalValue *GV, SelectionDAG &DAG, EVT PtrVT, const SDLoc &dl)
const TargetMachine & getTarget() const
BasicBlock * GetInsertBlock() const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
static bool isSRL16(const SDValue &Op)
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
static bool isStore(int Opcode)
SmallVector< ISD::OutputArg, 32 > Outs
bool useNEONForSinglePrecisionFP() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
CallLoweringInfo & setZExtResult(bool Value=true)
bool isTargetDarwin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
void setIsSplitCSR(bool s)
An instruction for storing to memory.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
op_iterator op_begin() const
unsigned getStackAlignment() const
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
static Function * getFunction(Constant *C)
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isStrongerThanMonotonic(AtomicOrdering ao)
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
static const MCPhysReg GPRArgRegs[]
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
amdgpu Simplify well known AMD library false Value * Callee
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
static SDValue PerformAddeSubeCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MVT getVectorElementType() const
Value * getOperand(unsigned i) const
Analysis containing CSE Info
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
static SDValue FindBFIToCombineWith(SDNode *N)
Class to represent pointers.
unsigned getByValSize() const
UNDEF - An undefined node.
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
bool isReadOnly(const GlobalValue *GV) const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG)
PerformVMOVDRRCombine - Target-specific dag combine xforms for ARMISD::VMOVDRR.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
int getPromotedConstpoolIncrease() const
bool hasDivideInARMMode() const
AddrOpc getAM2Op(unsigned AM2Opc)
bool isTargetWatchABI() const
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned LdSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment load operation with given size.
static EVT getExtensionTo64Bits(const EVT &OrigVT)
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool isOSWindows() const
Tests whether the OS is Windows.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
The memory access is volatile.
void setReturnRegsCount(unsigned s)
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
const SDValue & getBasePtr() const
A switch()-like statement whose cases are string literals.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
Type * getReturnType() const
Returns the type of the ret val.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
static SDValue PerformBFICombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
bool isAllOnesValue() const
Determine if all bits are set.
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
bool preferISHSTBarriers() const
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
int getVarArgsFrameIndex() const
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
bool useSoftFloat() const override
unsigned const MachineRegisterInfo * MRI
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
isSignExtended - Check if a node is a vector value that is sign-extended or a constant BUILD_VECTOR w...
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned countPopulation() const
Count the number of bits set.
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double, and whose elements are just simple data values (i.e.
static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) can replace combinations of ...
Value * getCalledValue() const
Value * concatenateVectors(IRBuilder<> &Builder, ArrayRef< Value *> Vecs)
Concatenate a list of vectors.
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
LLVM Basic Block Representation.
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
bool isOptionalDef() const
Set if this operand is a optional def.
The instances of the Type class are immutable: once they are created, they are never changed...
static SDValue PerformVDUPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
size_t size() const
size - Get the array size.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
void resetAll()
Resets the known state of all bits.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
const SDValue & getOperand(unsigned Num) const
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG)
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSHL16(const SDValue &Op)
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
bool isPointerTy() const
True if this is an instance of PointerType.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
static cl::opt< unsigned > ConstpoolPromotionMaxSize("arm-promote-constant-max-size", cl::Hidden, cl::desc("Maximum size of constant to promote into a constant pool"), cl::init(64))
static ManagedStatic< OptionRegistry > OR
bool isTargetWatchOS() const
ConstantFP - Floating Point Values [float, double].
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
static bool isReverseMask(ArrayRef< int > M, EVT VT)
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isAcquireOrStronger(AtomicOrdering ao)
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void AddToWorklist(SDNode *N)
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
static mvt_range fp_valuetypes()
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC)
IntCCToARMCC - Convert a DAG integer condition code to an ARM CC.
static bool BitsProperlyConcatenate(const APInt &A, const APInt &B)
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount though its operand...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
This class provides iterator support for SDUse operands that use a specific SDNode.
static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, SelectionDAG &DAG)
static bool isSRA16(const SDValue &Op)
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool CombineTo(SDValue O, SDValue N)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, SDValue &SatK)
static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
void computeMaxCallFrameSize(const MachineFunction &MF)
Computes the maximum size of a callframe and the AdjustsStack property.
TRAP - Trapping instruction.
const APInt & getAPIntValue() const
bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
const Triple & getTargetTriple() const
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, SDValue &RetVal1, SDValue &RetVal2)
Value * getPointerOperand()
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
DEBUGTRAP - Trap intended to get the attention of a debugger.
static mvt_range vector_valuetypes()
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM...
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
static cl::opt< bool > EnableConstpoolPromotion("arm-promote-constant", cl::Hidden, cl::desc("Enable / disable promotion of unnamed_addr constants into " "constant pools"), cl::init(false))
self_iterator getIterator()
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
The memory access is non-temporal.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
Class to represent integer types.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
bool allowsUnalignedMem() const
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
static void ReplaceCMP_SWAP_64Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
const ARMSubtarget * getSubtarget() const
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
const MachineInstrBuilder & addFrameIndex(int Idx) const
unsigned getInRegsParamsProcessed() const
bool isThumb1OnlyFunction() const
Bit counting operators with an undefined result for zero inputs.
static SDValue PerformVDUPLANECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformVDUPLANECombine - Target-specific dag combine xforms for ARMISD::VDUPLANE. ...
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SmallPtrSet< const GlobalVariable *, 2 > & getGlobalsPromotedToConstantPool()
succ_iterator succ_begin()
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
std::vector< ArgListEntry > ArgListTy
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformORCombine - Target-specific dag combine xforms for ISD::OR.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getAlignment() const
static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
static SDValue PerformORCombineToSMULWBT(SDNode *OR, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool isPositionIndependent() const
This structure contains all information that is necessary for lowering calls.
static bool isLTorLE(ISD::CondCode CC)
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, SelectionDAG &DAG)
unsigned getNumOperands() const
Return the number of values used by this operation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
const std::string & getConstraintString() const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
Triple - Helper class for working with autoconf configuration names.
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
The memory access writes data.
static const int BlockSize
bool isReleaseOrStronger(AtomicOrdering ao)
bool use_empty() const
Return true if there are no uses of this node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformShiftCombine - Checks for immediate versions of vector shifts and lowers them.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
void dump() const
Dump this node, for debugging.
const TargetLowering & getTargetLoweringInfo() const
Iterator for intrusive lists based on ilist_node.
void setPromotedConstpoolIncrease(int Sz)
CCState - This class holds information needed while lowering arguments and return values...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
BlockVerifier::State From
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool useSoftFloat() const
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isTargetAEABI() const
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
Getvshiftimm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
MachineOperand class - Representation of each machine instruction operand.
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
Module.h This file contains the declarations for the Module class.
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total value size to 64 bits...
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
bool isFPBrccSlow() const
const InstrItineraryData * getInstrItineraryData() const override
getInstrItins - Return the instruction itineraries based on subtarget selection.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isTargetAndroid() const
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
bool hasMPExtension() const
BRCOND - Conditional branch.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned char TargetFlags=0) const
Byte Swap and Counting operators.
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
LLVM_NODISCARD T pop_back_val()
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
This is an abstract virtual class for memory operations.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
const Constant * getConstVal() const
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
bool isCalledByLegalizer() const
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
CallLoweringInfo & setSExtResult(bool Value=true)
unsigned getAM2Offset(unsigned AM2Opc)
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Represents one node in the SelectionDAG.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, bool &swpCmpOps, bool &swpVselOps)
void setAdjustsStack(bool V)
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
BBTy * getParent() const
Get the basic block containing the call site.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
const Function & getFunction() const
Return the LLVM function that this machine code represents.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
unsigned getPrefLoopAlignment() const
unsigned logBase2() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
The access may modify the value stored in memory.
static bool isS16(const SDValue &Op, SelectionDAG &DAG)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned getVectorNumElements() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
size_t use_size() const
Return the number of uses of this node.
unsigned getPreferredAlignment(const GlobalVariable *GV) const
Returns the preferred alignment of the specified global.
static unsigned isNEONTwoResultShuffleMask(ArrayRef< int > ShuffleMask, EVT VT, unsigned &WhichResult, bool &isV_UNDEF)
Check if ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), and return the corresponding AR...
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
Class to represent vector types.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
EVT getMemoryVT() const
Return the type of the in-memory value.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Class for arbitrary precision integers.
unsigned getByValAlign() const
CodeModel::Model getCodeModel() const
Returns the code model.
Instruction * makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const
iterator_range< use_iterator > uses()
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static use_iterator use_end()
typename SuperClass::iterator iterator
iterator_range< user_iterator > users()
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
static SDValue PerformAddcSubcCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ANY_EXTEND - Used for integer types. The high bits are undefined.
bool genLongCalls() const
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
int getMaskElt(unsigned Idx) const
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
void setArgumentStackSize(unsigned size)
static SDValue PerformXORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool hasVMLxForwarding() const
static bool isVTBLMask(ArrayRef< int > M, EVT VT)
bool isEmpty() const
Returns true if there are no itineraries.
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
amdgpu Simplify well known AMD library false Value Value * Arg
const MachineBasicBlock * getParent() const
static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) can replace combinations of ...
bool isUnknown() const
Returns true if we don't know any bits.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CallLoweringInfo & setTailCall(bool Value=true)
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
const TargetRegisterClass * getRegClassFor(MVT VT) const override
getRegClassFor - Return the register class that should be used for the specified value type...
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
Section Relative (Windows TLS)
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
BR_JT - Jumptable branch.
static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, SelectionDAG &DAG)
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
const Triple & getTargetTriple() const
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static CondCodes getOppositeCondition(CondCodes CC)
bool shouldFoldShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
static bool canChangeToInt(SDValue Op, bool &SeenZero, const ARMSubtarget *Subtarget)
canChangeToInt - Given the fp compare operand, return true if it is suitable to morph to an integer c...
bool hasRetAddrStack() const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
void setVarArgsFrameIndex(int Index)
SmallVector< SDValue, 32 > OutVals
bool is64BitVector() const
Return true if this is a 64-bit vector type.
static SDValue PerformVMOVRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
static bool LowerToByteSwap(CallInst *CI)
Try to replace a call instruction with a call to a bswap intrinsic.
unsigned getNumArgOperands() const
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isVector() const
Return true if this is a vector value type.
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const unsigned char * bytes_begin() const
Bitwise operators - logical and, logical or, logical xor.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2, return the log base 2 integer value.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
unsigned getAlignment() const
Return the alignment of the access that is being performed.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned getLocMemOffset() const
ObjectFormatType getObjectFormat() const
getFormat - Get the object format for this triple.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
static bool isVUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of "vector_shuffle v...
bool isEHPad() const
Returns true if the block is a landing pad.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
LLVM_NODISCARD bool empty() const
bool is128BitVector() const
Return true if this is a 128-bit vector type.
StringRef getValueAsString() const
Return the attribute's value as a string.
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, MachineInstr &MI, const SDNode *Node)
Attaches vregs to MEMCPY that it will use as scratch registers when it is expanded into LDM/STM...
static SDValue LowerInterruptReturn(SmallVectorImpl< SDValue > &RetOps, const SDLoc &DL, SelectionDAG &DAG)
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
static bool isLegalT2AddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
const Function * getParent() const
Return the enclosing method, or null if none.
static bool isLegalAddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
isLegalAddressImmediate - Return true if the integer value can be used as the offset of the target ad...
void setReg(unsigned Reg)
Change the register this operand corresponds to.
void setArgRegsSaveSize(unsigned s)
static MachineOperand CreateImm(int64_t Val)
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
const TargetSubtargetInfo & getSubtarget() const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned StSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment store operation with given size.
Flags getFlags() const
Return the raw flags of the source value,.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
const ARMBaseRegisterInfo * getRegisterInfo() const override
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
The memory access always returns the same value (or traps).
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
static cl::opt< unsigned > ConstpoolPromotionMaxTotal("arm-promote-constant-max-total", cl::Hidden, cl::desc("Maximum size of ALL constants to promote into a constant pool"), cl::init(128))
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
unsigned InferPtrAlignment(SDValue Ptr) const
Infer alignment of a load / store address.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Rename collisions when linking (static functions).
static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
static const int LAST_INDEXED_MODE
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
SDValue getValue(unsigned R) const
unsigned getInRegsParamsCount() const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
constexpr bool isUInt< 16 >(uint64_t x)
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
LLVM_NODISCARD bool empty() const
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded, TargetLoweringOpt &TLO) const override
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCondCode(ISD::CondCode Cond)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isGTorGE(ISD::CondCode CC)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isTargetMachO() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned char TargetFlags=0) const
const MachinePointerInfo & getPointerInfo() const
static SDValue PerformVLDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
static SDValue PerformORCombineToBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static RTLIB::Libcall getDivRemLibcall(const SDNode *N, MVT::SimpleValueType SVT)
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
bool hasAtomicStore() const
Return true if this atomic instruction stores to memory.
static bool isVTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of "vector_shuffle v...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC, bool &Invert, SDValue &OtherOp, SelectionDAG &DAG)
void insert(iterator MBBI, MachineBasicBlock *MBB)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
void getInRegsParamInfo(unsigned InRegsParamRecordIndex, unsigned &BeginReg, unsigned &EndReg) const
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
Synchronized with respect to signal handlers executing in the same thread.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
CallLoweringInfo & setInRegister(bool Value=true)
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
ArrayRef< int > getMask() const
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
bool supportsTailCall() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2)
Return the load opcode for a given load size.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
void push_back(MachineBasicBlock *MBB)
static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
SDValue getValueType(EVT)
KnownBits sext(unsigned BitWidth)
Sign extends the underlying known Zero and One bits.
const MCOperandInfo * OpInfo
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
PREFETCH - This corresponds to a prefetch intrinsic.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which...
bool isUndef() const
Return true if the type of the node type undefined.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target...
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static SDValue PerformSHLSimplify(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
void rewindByValRegsInfo()
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone...
bool isTargetWindows() const
Primary interface to the complete machine description for the target machine.
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock *> &DestBBs)
createJumpTableIndex - Create a new jump table.
Type * getElementType() const
static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSTORECombine - Target-specific dag combine xforms for ISD::STORE.
const APFloat & getValueAPF() const
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
StringRef - Represent a constant reference to a string, i.e.
SetCC operator - This evaluates to a true value iff the condition is true.
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
Type * getArrayElementType() const
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
static BranchProbability getZero()
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget, SelectionDAG &DAG)
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
bool operator==(uint64_t V1, const APInt &V2)
unsigned getNumOperands() const
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
bool hasAnyDataBarrier() const
static bool isVolatile(Instruction *Inst)
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
unsigned getLocReg() const
uint64_t getZExtValue() const
TRUNCATE - Completely drop the high bits.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isBitFieldInvertedMask(unsigned v)
static SDValue CombineANDShift(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
bool hasDataBarrier() const
const MachineOperand & getOperand(unsigned i) const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand *> NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
OutputIt copy(R &&Range, OutputIt Out)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
ExpandBITCAST - If the target supports VFP, this function is called to expand a bit convert where eit...
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
static SDValue AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Perform various unary floating-point operations inspired by libm.
VectorType * getType() const
Overload to return most specific vector type.
static bool isSaturatingConditional(const SDValue &Op, SDValue &V, uint64_t &K, bool &usat)
Value * getPointerOperand()
static IntegerType * getInt8Ty(LLVMContext &C)
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
LLVMContext * getContext() const
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static bool isSplatMask(const int *Mask, EVT VT)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
static bool isVEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseVEXT, unsigned &Imm)
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
Type * getElementType() const
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
std::vector< MachineBasicBlock * >::iterator succ_iterator
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Carry-using nodes for multiple precision addition and subtraction.
static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
bool empty() const
empty - Check if the array is empty.
bool isArrayTy() const
True if this is an instance of ArrayType.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
FloatABI::ABIType FloatABIType
FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
static TargetLowering::ArgListTy getDivRemArgList(const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget)
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, const SDLoc &dl, EVT &VT, bool is128Bits, NEONModImmType type)
isNEONModifiedImm - Check if the specified splat value corresponds to a valid vector constant for a N...
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
This class is used to represent ISD::LOAD nodes.
static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V)
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...