124 using namespace llvm;
127 #define DEBUG_TYPE "isel" 135 cl::desc(
"Generate low-precision inline sequences " 136 "for some float libcalls"),
142 cl::desc(
"Set the case probability threshold for peeling the case from a " 143 "switch statement. A value greater than 100 will void this " 166 if (
auto *R = dyn_cast<ReturnInst>(V))
167 return R->getParent()->getParent()->getCallingConv();
169 if (
auto *CI = dyn_cast<CallInst>(V)) {
170 const bool IsInlineAsm = CI->isInlineAsm();
171 const bool IsIndirectFunctionCall =
172 !IsInlineAsm && !CI->getCalledFunction();
177 const bool IsInstrinsicCall =
178 !IsInlineAsm && !IsIndirectFunctionCall &&
181 if (!IsInlineAsm && !IsInstrinsicCall)
182 return CI->getCallingConv();
189 const SDValue *Parts,
unsigned NumParts,
199 const SDValue *Parts,
unsigned NumParts,
207 assert(NumParts > 0 &&
"No parts to assemble!");
218 unsigned RoundParts = NumParts & (NumParts - 1) ?
219 1 <<
Log2_32(NumParts) : NumParts;
220 unsigned RoundBits = PartBits * RoundParts;
221 EVT RoundVT = RoundBits == ValueBits ?
227 if (RoundParts > 2) {
231 RoundParts / 2, PartVT, HalfVT, V);
242 if (RoundParts < NumParts) {
244 unsigned OddParts = NumParts - RoundParts;
275 !PartVT.
isVector() &&
"Unexpected split");
286 if (PartEVT == ValueVT)
290 ValueVT.
bitsLT(PartEVT)) {
303 if (ValueVT.
bitsLT(PartEVT)) {
307 if (AssertOp.hasValue())
308 Val = DAG.
getNode(*AssertOp, DL, PartEVT, Val,
329 const Twine &ErrMsg) {
330 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
334 const char *AsmError =
", possible invalid constraint for vector type";
335 if (
const CallInst *CI = dyn_cast<CallInst>(I))
336 if (isa<InlineAsm>(CI->getCalledValue()))
337 return Ctx.
emitError(I, ErrMsg + AsmError);
348 const SDValue *Parts,
unsigned NumParts,
352 assert(NumParts > 0 &&
"No parts to assemble!");
353 const bool IsABIRegCopy = CallConv.
hasValue();
362 unsigned NumIntermediates;
368 NumIntermediates, RegisterVT);
372 NumIntermediates, RegisterVT);
375 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
377 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
380 "Part type sizes don't match!");
384 if (NumIntermediates == NumParts) {
387 for (
unsigned i = 0; i != NumParts; ++i)
389 PartVT, IntermediateVT, V);
390 }
else if (NumParts > 0) {
393 assert(NumParts % NumIntermediates == 0 &&
394 "Must expand into a divisible number of parts!");
395 unsigned Factor = NumParts / NumIntermediates;
396 for (
unsigned i = 0; i != NumIntermediates; ++i)
398 PartVT, IntermediateVT, V);
407 : NumIntermediates));
410 DL, BuiltVectorTy, Ops);
416 if (PartEVT == ValueVT)
426 "Cannot narrow, it would be a lossy transformation");
437 "Cannot handle this kind of promotion");
467 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
489 SDValue *Parts,
unsigned NumParts,
MVT PartVT,
501 unsigned OrigNumParts = NumParts;
503 "Copying to an illegal type!");
509 EVT PartEVT = PartVT;
510 if (PartEVT == ValueVT) {
511 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
519 assert(NumParts == 1 &&
"Do not know what to promote to!");
530 "Unknown mismatch!");
532 Val = DAG.
getNode(ExtendKind, DL, ValueVT, Val);
538 assert(NumParts == 1 && PartEVT != ValueVT);
544 "Unknown mismatch!");
554 "Failed to tile the value with PartVT!");
557 if (PartEVT != ValueVT) {
559 "scalar-to-vector conversion failed");
568 if (NumParts & (NumParts - 1)) {
571 "Do not know what to expand to!");
572 unsigned RoundParts = 1 <<
Log2_32(NumParts);
573 unsigned RoundBits = RoundParts * PartBits;
574 unsigned OddParts = NumParts - RoundParts;
577 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
584 NumParts = RoundParts;
596 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
597 for (
unsigned i = 0; i < NumParts; i += StepSize) {
598 unsigned ThisBits = StepSize * PartBits / 2;
601 SDValue &Part1 = Parts[i+StepSize/2];
608 if (ThisBits == PartBits && ThisVT != PartVT) {
627 if (PartNumElts > ValueNumElts &&
635 for (
unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
654 const bool IsABIRegCopy = CallConv.
hasValue();
657 EVT PartEVT = PartVT;
658 if (PartEVT == ValueVT) {
679 "lossy conversion of vector to scalar type");
680 EVT IntermediateType =
695 unsigned NumIntermediates;
698 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
700 NumIntermediates, RegisterVT);
703 TLI.getVectorTypeBreakdown(*DAG.
getContext(), ValueVT, IntermediateVT,
704 NumIntermediates, RegisterVT);
707 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
709 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
711 unsigned IntermediateNumElts = IntermediateVT.
isVector() ?
715 unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts;
720 if (ValueVT != BuiltVectorTy) {
729 for (
unsigned i = 0; i != NumIntermediates; ++i) {
732 DAG.
getConstant(i * IntermediateNumElts, DL, IdxVT));
741 if (NumParts == NumIntermediates) {
744 for (
unsigned i = 0; i != NumParts; ++i)
745 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
746 }
else if (NumParts > 0) {
749 assert(NumIntermediates != 0 &&
"division by zero");
750 assert(NumParts % NumIntermediates == 0 &&
751 "Must expand into a divisible number of parts!");
752 unsigned Factor = NumParts / NumIntermediates;
753 for (
unsigned i = 0; i != NumIntermediates; ++i)
754 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
761 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
762 RegCount(1, regs.
size()), CallConv(CC) {}
780 for (
unsigned i = 0; i != NumRegs; ++i)
782 RegVTs.push_back(RegisterVT);
811 for (
unsigned i = 0; i != NumRegs; ++i) {
838 if (NumZeroBits == RegSize) {
853 }
else if (NumSignBits > 1) {
897 NumParts, RegisterVT, V, CallConv, ExtendKind);
903 for (
unsigned i = 0; i != NumRegs; ++i) {
915 if (NumRegs == 1 || Flag)
926 Chain = Chains[NumRegs-1];
932 unsigned MatchingIdx,
const SDLoc &dl,
934 std::vector<SDValue> &Ops)
const {
960 "No 1:1 mapping from clobbers to regs?");
968 "If we clobbered the stack pointer, MFI should know about it.");
976 for (
unsigned i = 0; i != NumRegs; ++i) {
989 unsigned RegCount = std::get<0>(CountAndVT);
990 MVT RegisterVT = std::get<1>(CountAndVT);
992 for (
unsigned E = I + RegCount; I !=
E; ++
I)
1005 LPadToCallSiteMap.clear();
1010 UnusedArgNodeMap.clear();
1011 PendingLoads.clear();
1012 PendingExports.clear();
1014 HasTailCall =
false;
1015 SDNodeOrder = LowestSDNodeOrder;
1016 StatepointLowering.clear();
1020 DanglingDebugInfoMap.clear();
1024 if (PendingLoads.empty())
1027 if (PendingLoads.size() == 1) {
1028 SDValue Root = PendingLoads[0];
1030 PendingLoads.clear();
1038 size_t Limit = (1 << 16) - 1;
1039 while (PendingLoads.size() > Limit) {
1040 unsigned SliceIdx = PendingLoads.size() - Limit;
1044 PendingLoads.erase(PendingLoads.begin() + SliceIdx, PendingLoads.end());
1045 PendingLoads.emplace_back(NewTF);
1048 PendingLoads.clear();
1056 if (PendingExports.empty())
1061 unsigned i = 0, e = PendingExports.size();
1062 for (; i != e; ++i) {
1063 assert(PendingExports[i].getNode()->getNumOperands() > 1);
1064 if (PendingExports[i].getNode()->getOperand(0) == Root)
1069 PendingExports.push_back(Root);
1074 PendingExports.clear();
1082 HandlePHINodesInSuccessorBlocks(I.
getParent());
1086 if (!isa<DbgInfoIntrinsic>(I))
1093 if (
auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1099 if (
SDNode *Node = getNodeForIRValue(&I)) {
1102 if (!Node->getFlags().isDefined())
1103 Node->setFlags(IncomingFlags);
1105 Node->intersectFlagsWith(IncomingFlags);
1111 CopyToExportRegsIfNeeded(&I);
1116 void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1126 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1127 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; 1128 #include "llvm/IR/Instruction.def" 1134 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1138 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1139 LLVM_DEBUG(
dbgs() <<
"Dropping dangling debug info for " << *DI <<
"\n");
1145 for (
auto &DDIMI : DanglingDebugInfoMap) {
1146 DanglingDebugInfoVector &DDIV = DDIMI.second;
1147 DDIV.erase(
remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1155 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1156 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1159 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1160 for (
auto &DDI : DDIV) {
1162 assert(DI &&
"Ill-formed DanglingDebugInfo");
1165 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1169 "Expected inlined-at fields to agree");
1172 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl,
false, Val)) {
1174 << DbgSDNodeOrder <<
"] for:\n " << *DI <<
"\n");
1181 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to " 1182 << ValSDNodeOrder <<
"\n");
1183 SDV = getDbgValue(Val, Variable, Expr, dl,
1184 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1188 <<
"in EmitFuncArgumentDbgValue\n");
1201 if (It != FuncInfo.ValueMap.
end()) {
1202 unsigned InReg = It->second;
1208 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain,
nullptr,
1210 resolveDanglingDebugInfo(V, Result);
1230 SDValue Val = getValueImpl(V);
1232 resolveDanglingDebugInfo(V, Val);
1238 return (NodeMap.find(V) != NodeMap.end()) ||
1239 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1248 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(
N)) {
1259 SDValue Val = getValueImpl(V);
1261 resolveDanglingDebugInfo(V, Val);
1270 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1279 if (isa<ConstantPointerNull>(
C)) {
1285 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1292 visit(CE->getOpcode(), *CE);
1294 assert(N1.getNode() &&
"visit didn't populate the NodeMap!");
1298 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1307 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1315 dyn_cast<ConstantDataSequential>(
C)) {
1317 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1318 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1321 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1325 if (isa<ArrayType>(CDS->getType()))
1331 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1332 "Unknown struct or array constant!");
1336 unsigned NumElts = ValueVTs.
size();
1340 for (
unsigned i = 0; i != NumElts; ++i) {
1341 EVT EltVT = ValueVTs[i];
1342 if (isa<UndefValue>(
C))
1343 Constants[i] = DAG.
getUNDEF(EltVT);
1347 Constants[i] = DAG.
getConstant(0, getCurSDLoc(), EltVT);
1363 for (
unsigned i = 0; i != NumElements; ++i)
1364 Ops.
push_back(getValue(CV->getOperand(i)));
1366 assert(isa<ConstantAggregateZero>(
C) &&
"Unknown vector constant!");
1375 Ops.
assign(NumElements, Op);
1384 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1386 FuncInfo.StaticAllocaMap.find(AI);
1387 if (SI != FuncInfo.StaticAllocaMap.end())
1393 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1394 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1399 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain,
nullptr, V);
1405 void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1415 if (IsMSVCCXX || IsCoreCLR)
1433 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1446 if (isa<ConstantTokenNone>(ParentPad))
1447 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1449 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
1450 assert(SuccessorColor &&
"No parent funclet for catchret!");
1452 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
1461 void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
1464 FuncInfo.MBB->setIsEHScopeEntry();
1467 FuncInfo.MBB->setIsEHFuncletEntry();
1468 FuncInfo.MBB->setIsCleanupFuncletEntry();
1495 if (isa<LandingPadInst>(Pad)) {
1497 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
1499 }
else if (isa<CleanupPadInst>(Pad)) {
1502 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
1503 UnwindDests.
back().first->setIsEHScopeEntry();
1505 UnwindDests.back().first->setIsEHFuncletEntry();
1507 }
else if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1509 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1510 UnwindDests.emplace_back(FuncInfo.
MBBMap[CatchPadBB], Prob);
1512 if (IsMSVCCXX || IsCoreCLR)
1513 UnwindDests.back().first->setIsEHFuncletEntry();
1515 UnwindDests.back().first->setIsEHScopeEntry();
1517 NewEHPadBB = CatchSwitch->getUnwindDest();
1523 if (BPI && NewEHPadBB)
1525 EHPadBB = NewEHPadBB;
1539 for (
auto &UnwindDest : UnwindDests) {
1540 UnwindDest.first->setIsEHPad();
1541 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1543 FuncInfo.MBB->normalizeSuccProbs();
1551 void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
1555 void SelectionDAGBuilder::visitRet(
const ReturnInst &I) {
1558 SDValue Chain = getControlRoot();
1570 LowerDeoptimizingReturn();
1574 if (!FuncInfo.CanLowerReturn) {
1575 unsigned DemoteReg = FuncInfo.DemoteRegister;
1588 DemoteReg, PtrValueVTs[0]);
1594 unsigned NumValues = ValueVTs.size();
1597 for (
unsigned i = 0; i != NumValues; ++i) {
1612 unsigned NumValues = ValueVTs.
size();
1627 bool RetInReg = F->getAttributes().hasAttribute(
1630 for (
unsigned j = 0; j != NumValues; ++j) {
1631 EVT VT = ValueVTs[j];
1643 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1656 for (
unsigned i = 0; i < NumParts; ++i) {
1671 assert(FuncInfo.SwiftErrorArg &&
"Need a swift error argument");
1680 DAG.
getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt(
1681 &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first,
1689 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1693 "LowerReturn didn't return a valid chain!");
1708 if (VMI != FuncInfo.ValueMap.
end()) {
1709 assert(!V->
use_empty() &&
"Unused value assigned virtual registers!");
1710 CopyValueToVirtualRegister(V, VMI->second);
1719 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
1722 if (FuncInfo.isExportedInst(V))
return;
1724 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1725 CopyValueToVirtualRegister(V, Reg);
1734 if (
VI->getParent() == FromBB)
1738 return FuncInfo.isExportedInst(V);
1743 if (isa<Argument>(V)) {
1748 return FuncInfo.isExportedInst(V);
1765 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
1778 Prob = getEdgeProbability(Src, Dst);
1784 if (
const Instruction *I = dyn_cast<Instruction>(V))
1805 if (
const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1809 if (CurBB == SwitchBB ||
1810 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1811 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1813 if (
const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1815 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1822 if (
TM.Options.NoNaNsFPMath)
1826 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
1827 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1828 SwitchCases.push_back(CB);
1836 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1837 SwitchCases.push_back(CB);
1854 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1869 if (BOpc == Instruction::And)
1870 BOpc = Instruction::Or;
1871 else if (BOpc == Instruction::Or)
1872 BOpc = Instruction::And;
1877 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1878 BOpc !=
unsigned(Opc) || !BOp->
hasOneUse() ||
1882 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1883 TProb, FProb, InvertCond);
1893 if (Opc == Instruction::Or) {
1914 auto NewTrueProb = TProb / 2;
1915 auto NewFalseProb = TProb / 2 + FProb;
1917 FindMergedConditions(BOp->
getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1918 NewTrueProb, NewFalseProb, InvertCond);
1924 FindMergedConditions(BOp->
getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1925 Probs[0], Probs[1], InvertCond);
1927 assert(Opc == Instruction::And &&
"Unknown merge op!");
1947 auto NewTrueProb = TProb + FProb / 2;
1948 auto NewFalseProb = FProb / 2;
1950 FindMergedConditions(BOp->
getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1951 NewTrueProb, NewFalseProb, InvertCond);
1957 FindMergedConditions(BOp->
getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1958 Probs[0], Probs[1], InvertCond);
1967 if (Cases.size() != 2)
return true;
1971 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1972 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1973 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1974 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1980 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1981 Cases[0].CC == Cases[1].CC &&
1982 isa<Constant>(Cases[0].CmpRHS) &&
1983 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1984 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1986 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1993 void SelectionDAGBuilder::visitBr(
const BranchInst &I) {
2033 if (
const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2037 (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2038 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2040 getEdgeProbability(BrMBB, Succ0MBB),
2041 getEdgeProbability(BrMBB, Succ1MBB),
2046 assert(SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2049 if (ShouldEmitAsBranches(SwitchCases)) {
2050 for (
unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
2051 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
2052 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
2056 visitSwitchCase(SwitchCases[0], BrMBB);
2057 SwitchCases.erase(SwitchCases.begin());
2063 for (
unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
2064 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
2066 SwitchCases.clear();
2072 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2076 visitSwitchCase(CB, BrMBB);
2084 SDValue CondLHS = getValue(CB.CmpLHS);
2099 Cond = DAG.
getSetCC(dl,
MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
2103 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2104 const APInt&
High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2106 SDValue CmpOp = getValue(CB.CmpMHS);
2109 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(
true)) {
2121 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2124 if (CB.TrueBB != CB.FalseBB)
2125 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2130 if (CB.TrueBB == NextBlock(SwitchBB)) {
2152 assert(JT.Reg != -1U &&
"Should lower JT Header first!");
2166 JumpTableHeader &JTH,
2168 SDLoc dl = getCurSDLoc();
2173 SDValue SwitchOp = getValue(JTH.SValue);
2186 unsigned JumpTableReg =
2189 JumpTableReg, SwitchOp);
2190 JT.Reg = JumpTableReg;
2205 if (JT.MBB != NextBlock(SwitchBB))
2221 DAG.
getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2250 SDLoc dl = getCurSDLoc();
2269 auto *Fn = cast<Function>(GuardCheck);
2271 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
2275 Entry.
Node = GuardVal;
2276 Entry.
Ty = FnTy->getParamType(0);
2277 if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2279 Args.push_back(Entry);
2284 .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2285 getValue(GuardCheck), std::move(Args));
2287 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
2299 SDValue GuardPtr = getValue(IRGuard);
2341 None,
false, getCurSDLoc(),
false,
false).second;
2349 SDLoc dl = getCurSDLoc();
2352 SDValue SwitchOp = getValue(B.SValue);
2365 bool UsePtrType =
false;
2369 for (
unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2383 B.Reg = FuncInfo.CreateReg(B.RegVT);
2388 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2389 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2397 if (MBB != NextBlock(SwitchBB))
2411 SDLoc dl = getCurSDLoc();
2417 if (PopCount == 1) {
2424 }
else if (PopCount == BB.Range) {
2444 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2446 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2457 if (NextMBB != NextBlock(SwitchBB))
2464 void SelectionDAGBuilder::visitInvoke(
const InvokeInst &I) {
2475 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2476 "Cannot lower invokes with arbitrary operand bundles yet!");
2480 if (isa<InlineAsm>(
Callee))
2482 else if (Fn && Fn->isIntrinsic()) {
2483 switch (Fn->getIntrinsicID()) {
2491 visitPatchpoint(&I, EHPadBB);
2502 LowerCallSiteWithDeoptBundle(&I, getValue(
Callee), EHPadBB);
2504 LowerCallTo(&I, getValue(
Callee),
false, EHPadBB);
2512 CopyToExportRegsIfNeeded(&I);
2523 addSuccessorWithProb(InvokeMBB, Return);
2524 for (
auto &UnwindDest : UnwindDests) {
2525 UnwindDest.first->setIsEHPad();
2526 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2536 void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
2537 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
2540 void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
2541 assert(FuncInfo.MBB->isEHPad() &&
2542 "Call to landingpad not in landing pad!");
2547 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2548 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2549 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2560 SDLoc dl = getCurSDLoc();
2562 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
2567 if (FuncInfo.ExceptionPointerVirtReg) {
2570 FuncInfo.ExceptionPointerVirtReg,
2578 FuncInfo.ExceptionSelectorVirtReg,
2588 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2590 for (
const CaseCluster &CC : Clusters)
2591 assert(CC.Low == CC.High &&
"Input clusters must be single-case");
2594 llvm::sort(Clusters, [](
const CaseCluster &a,
const CaseCluster &b) {
2595 return a.Low->getValue().slt(b.Low->getValue());
2599 const unsigned N = Clusters.size();
2600 unsigned DstIndex = 0;
2601 for (
unsigned SrcIndex = 0; SrcIndex <
N; ++SrcIndex) {
2602 CaseCluster &CC = Clusters[SrcIndex];
2606 if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2607 (CaseVal->
getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2610 Clusters[DstIndex - 1].High = CaseVal;
2611 Clusters[DstIndex - 1].Prob += CC.Prob;
2613 std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2614 sizeof(Clusters[SrcIndex]));
2617 Clusters.resize(DstIndex);
2623 for (
unsigned i = 0, e = JTCases.size(); i != e; ++i)
2624 if (JTCases[i].
first.HeaderBB == First)
2625 JTCases[i].first.HeaderBB = Last;
2628 for (
unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2629 if (BitTestCases[i].Parent == First)
2630 BitTestCases[i].Parent = Last;
2633 void SelectionDAGBuilder::visitIndirectBr(
const IndirectBrInst &I) {
2640 bool Inserted = Done.
insert(BB).second;
2645 addSuccessorWithProb(IndirectBrMBB, Succ);
2654 void SelectionDAGBuilder::visitUnreachable(
const UnreachableInst &I) {
2661 if (&I != &BB.
front()) {
2664 if (
const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2665 if (Call->doesNotReturn())
2674 void SelectionDAGBuilder::visitFSub(
const User &I) {
2699 case Instruction::Mul:
2700 case Instruction::And:
2701 case Instruction::Or:
2702 case Instruction::Xor:
2704 case Instruction::FAdd:
2705 case Instruction::FMul:
2706 if (
const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2707 if (FPOp->getFastMathFlags().isFast())
2719 unsigned ElemNumToReduce = ElemNum;
2742 bool ReduxExtracted =
false;
2744 while (!UsersToVisit.empty()) {
2745 auto User = UsersToVisit.back();
2746 UsersToVisit.pop_back();
2755 if (Inst->
getOpcode() == OpCode || isa<PHINode>(U)) {
2756 if (
const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2757 if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
2759 UsersToVisit.push_back(U);
2761 dyn_cast<ShuffleVectorInst>(U)) {
2768 if (ResultElements < ElemNum)
2771 if (ElemNumToReduce == 1)
2773 if (!isa<UndefValue>(U->getOperand(1)))
2775 for (
unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2776 if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2778 for (
unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2779 if (ShufInst->getMaskValue(i) != -1)
2784 if (!U->hasOneUse())
2788 if (!U2 || U2->getOpcode() != OpCode)
2792 if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2793 (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2794 UsersToVisit.push_back(U2);
2795 ElemNumToReduce /= 2;
2798 }
else if (isa<ExtractElementInst>(U)) {
2800 if (ElemNumToReduce != 1)
2804 if (!Val || !Val->
isZero())
2807 ReduxExtracted =
true;
2812 return ReduxExtracted;
2815 void SelectionDAGBuilder::visitUnary(
const User &I,
unsigned Opcode) {
2821 setValue(&I, UnNodeValue);
2824 void SelectionDAGBuilder::visitBinary(
const User &I,
unsigned Opcode) {
2826 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
2830 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
2831 Flags.
setExact(ExactOp->isExact());
2835 LLVM_DEBUG(
dbgs() <<
"Detected a reduction operation:" << I <<
"\n");
2842 setValue(&I, BinNodeValue);
2845 void SelectionDAGBuilder::visitShift(
const User &I,
unsigned Opcode) {
2855 unsigned Op2Size = Op2.getValueSizeInBits();
2856 SDLoc DL = getCurSDLoc();
2859 if (ShiftSize > Op2Size)
2866 else if (ShiftSize >=
Log2_32_Ceil(Op2.getValueSizeInBits()))
2881 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2882 nuw = OFBinOp->hasNoUnsignedWrap();
2883 nsw = OFBinOp->hasNoSignedWrap();
2886 dyn_cast<const PossiblyExactOperator>(&I))
2887 exact = ExactOp->isExact();
2898 void SelectionDAGBuilder::visitSDiv(
const User &I) {
2903 Flags.
setExact(isa<PossiblyExactOperator>(&I) &&
2904 cast<PossiblyExactOperator>(&I)->isExact());
2909 void SelectionDAGBuilder::visitICmp(
const User &I) {
2911 if (
const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2912 predicate = IC->getPredicate();
2913 else if (
const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2921 setValue(&I, DAG.
getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2924 void SelectionDAGBuilder::visitFCmp(
const User &I) {
2926 if (
const FCmpInst *
FC = dyn_cast<FCmpInst>(&I))
2927 predicate =
FC->getPredicate();
2935 if ((FPMO && FPMO->hasNoNaNs()) ||
TM.Options.NoNaNsFPMath)
2940 setValue(&I, DAG.
getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2947 return isa<SelectInst>(V);
2951 void SelectionDAGBuilder::visitSelect(
const User &I) {
2955 unsigned NumValues = ValueVTs.
size();
2956 if (NumValues == 0)
return;
2962 auto BaseOps = {Cond};
2968 EVT VT = ValueVTs[0];
2975 VT != TLI.getTypeToTransformTo(Ctx, VT))
2976 VT = TLI.getTypeToTransformTo(Ctx, VT);
2981 bool UseScalarMinMax = VT.
isVector() &&
2987 switch (SPR.Flavor) {
2993 switch (SPR.NaNBehavior) {
3002 else if (UseScalarMinMax)
3010 switch (SPR.NaNBehavior) {
3020 else if (UseScalarMinMax)
3030 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3038 LHSVal = getValue(LHS);
3039 RHSVal = getValue(RHS);
3044 for (
unsigned i = 0; i != NumValues; ++i) {
3047 Ops.push_back(
SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3048 Values[i] = DAG.
getNode(OpCode, getCurSDLoc(),
3049 LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
3057 void SelectionDAGBuilder::visitTrunc(
const User &I) {
3065 void SelectionDAGBuilder::visitZExt(
const User &I) {
3074 void SelectionDAGBuilder::visitSExt(
const User &I) {
3083 void SelectionDAGBuilder::visitFPTrunc(
const User &I) {
3086 SDLoc dl = getCurSDLoc();
3094 void SelectionDAGBuilder::visitFPExt(
const User &I) {
3102 void SelectionDAGBuilder::visitFPToUI(
const User &I) {
3110 void SelectionDAGBuilder::visitFPToSI(
const User &I) {
3118 void SelectionDAGBuilder::visitUIToFP(
const User &I) {
3126 void SelectionDAGBuilder::visitSIToFP(
const User &I) {
3134 void SelectionDAGBuilder::visitPtrToInt(
const User &I) {
3143 void SelectionDAGBuilder::visitIntToPtr(
const User &I) {
3152 void SelectionDAGBuilder::visitBitCast(
const User &I) {
3154 SDLoc dl = getCurSDLoc();
3168 setValue(&I, DAG.
getConstant(
C->getValue(), dl, DestVT,
false,
3174 void SelectionDAGBuilder::visitAddrSpaceCast(
const User &I) {
3189 void SelectionDAGBuilder::visitInsertElement(
const User &I) {
3197 InVec, InVal, InIdx));
3200 void SelectionDAGBuilder::visitExtractElement(
const User &I) {
3210 void SelectionDAGBuilder::visitShuffleVector(
const User &I) {
3213 SDLoc DL = getCurSDLoc();
3217 unsigned MaskNumElts = Mask.
size();
3224 if (SrcNumElts == MaskNumElts) {
3230 if (SrcNumElts < MaskNumElts) {
3234 if (MaskNumElts % SrcNumElts == 0) {
3238 unsigned NumConcat = MaskNumElts / SrcNumElts;
3239 bool IsConcat =
true;
3241 for (
unsigned i = 0; i != MaskNumElts; ++i) {
3247 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3248 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3249 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
3254 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3261 for (
auto Src : ConcatSrcs) {
3274 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
3275 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3296 for (
unsigned i = 0; i != MaskNumElts; ++i) {
3298 if (Idx >= (
int)SrcNumElts)
3299 Idx -= SrcNumElts - PaddedMaskNumElts;
3307 if (MaskNumElts != PaddedMaskNumElts)
3312 setValue(&I, Result);
3316 if (SrcNumElts > MaskNumElts) {
3319 int StartIdx[2] = { -1, -1 };
3320 bool CanExtract =
true;
3321 for (
int Idx : Mask) {
3326 if (Idx >= (
int)SrcNumElts) {
3334 int NewStartIdx =
alignDown(Idx, MaskNumElts);
3335 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3336 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3340 StartIdx[Input] = NewStartIdx;
3343 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3349 for (
unsigned Input = 0; Input < 2; ++Input) {
3350 SDValue &Src = Input == 0 ? Src1 : Src2;
3351 if (StartIdx[Input] < 0)
3363 for (
int &Idx : MappedOps) {
3364 if (Idx >= (
int)SrcNumElts)
3365 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3381 for (
int Idx : Mask) {
3387 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3388 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
3400 void SelectionDAGBuilder::visitInsertValue(
const User &I) {
3403 Indices = IV->getIndices();
3405 Indices = cast<ConstantExpr>(&
I)->getIndices();
3411 bool IntoUndef = isa<UndefValue>(Op0);
3412 bool FromUndef = isa<UndefValue>(Op1);
3422 unsigned NumAggValues = AggValueVTs.
size();
3423 unsigned NumValValues = ValValueVTs.size();
3427 if (!NumAggValues) {
3435 for (; i != LinearIndex; ++i)
3436 Values[i] = IntoUndef ? DAG.
getUNDEF(AggValueVTs[i]) :
3441 for (; i != LinearIndex + NumValValues; ++i)
3442 Values[i] = FromUndef ? DAG.
getUNDEF(AggValueVTs[i]) :
3446 for (; i != NumAggValues; ++i)
3447 Values[i] = IntoUndef ? DAG.
getUNDEF(AggValueVTs[i]) :
3454 void SelectionDAGBuilder::visitExtractValue(
const User &I) {
3457 Indices = EV->getIndices();
3459 Indices = cast<ConstantExpr>(&
I)->getIndices();
3464 bool OutOfUndef = isa<UndefValue>(Op0);
3472 unsigned NumValValues = ValValueVTs.
size();
3475 if (!NumValValues) {
3484 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3485 Values[i - LinearIndex] =
3494 void SelectionDAGBuilder::visitGetElementPtr(
const User &I) {
3500 SDLoc dl = getCurSDLoc();
3505 cast<VectorType>(I.
getType())->getVectorNumElements() : 0;
3515 const Value *Idx = GTI.getOperand();
3516 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
3517 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3520 uint64_t
Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3525 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3534 APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3539 if (!CI && isa<ConstantDataVector>(Idx) &&
3541 CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3548 SDValue OffsVal = VectorWidth ?
3576 if (ElementSize != 1) {
3578 unsigned Amt = ElementSize.
logBase2();
3597 void SelectionDAGBuilder::visitAlloca(
const AllocaInst &I) {
3600 if (FuncInfo.StaticAllocaMap.count(&I))
3603 SDLoc dl = getCurSDLoc();
3607 uint64_t TySize = DL.getTypeAllocSize(Ty);
3614 if (AllocSize.getValueType() != IntPtr)
3624 unsigned StackAlign =
3626 if (Align <= StackAlign)
3634 AllocSize = DAG.
getNode(
ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3635 DAG.
getConstant(StackAlign - 1, dl, IntPtr), Flags);
3640 DAG.
getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
3648 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3651 void SelectionDAGBuilder::visitLoad(
const LoadInst &I) {
3653 return visitAtomicLoad(I);
3660 if (
const Argument *
Arg = dyn_cast<Argument>(SV)) {
3661 if (
Arg->hasSwiftErrorAttr())
3662 return visitLoadFromSwiftError(I);
3665 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3666 if (Alloca->isSwiftError())
3667 return visitLoadFromSwiftError(I);
3688 unsigned NumValues = ValueVTs.size();
3693 bool ConstantMemory =
false;
3694 if (isVolatile || NumValues > MaxParallelChains)
3704 ConstantMemory =
true;
3710 SDLoc dl = getCurSDLoc();
3723 unsigned ChainI = 0;
3724 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3731 if (ChainI == MaxParallelChains) {
3732 assert(PendingLoads.empty() &&
"PendingLoads must be serialized first");
3749 if (isDereferenceable)
3755 MMOFlags, AAInfo, Ranges);
3761 if (!ConstantMemory) {
3767 PendingLoads.push_back(Chain);
3774 void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &I) {
3776 "call visitStoreToSwiftError when backend supports swifterror");
3783 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3784 "expect a single EVT for swifterror");
3788 unsigned VReg;
bool CreatedVReg;
3789 std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I);
3793 SDValue(Src.getNode(), Src.getResNo()));
3796 FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.
getOperand(1), VReg);
3799 void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &I) {
3801 "call visitLoadFromSwiftError when backend supports swifterror");
3806 "Support volatile, non temporal, invariant for load_from_swift_error");
3817 "load_from_swift_error should not be constant memory");
3823 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3824 "expect a single EVT for swifterror");
3828 getRoot(), getCurSDLoc(),
3829 FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first,
3835 void SelectionDAGBuilder::visitStore(
const StoreInst &I) {
3837 return visitAtomicStore(I);
3846 if (
const Argument *
Arg = dyn_cast<Argument>(PtrV)) {
3847 if (
Arg->hasSwiftErrorAttr())
3848 return visitStoreToSwiftError(I);
3851 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3852 if (Alloca->isSwiftError())
3853 return visitStoreToSwiftError(I);
3861 unsigned NumValues = ValueVTs.
size();
3873 SDLoc dl = getCurSDLoc();
3891 unsigned ChainI = 0;
3892 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3894 if (ChainI == MaxParallelChains) {
3905 Chains[ChainI] = St;
3913 void SelectionDAGBuilder::visitMaskedStore(
const CallInst &I,
3914 bool IsCompressing) {
3915 SDLoc sdl = getCurSDLoc();
3918 unsigned& Alignment) {
3922 Alignment = cast<ConstantInt>(I.
getArgOperand(2))->getZExtValue();
3926 unsigned& Alignment) {
3934 Value *PtrOperand, *MaskOperand, *Src0Operand;
3937 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3939 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3941 SDValue Ptr = getValue(PtrOperand);
3942 SDValue Src0 = getValue(Src0Operand);
3943 SDValue Mask = getValue(MaskOperand);
3961 setValue(&I, StoreNode);
3989 const Value *GEPPtr = GEP->getPointerOperand();
3995 unsigned FinalIndex = GEP->getNumOperands() - 1;
3996 Value *IndexVal = GEP->getOperand(FinalIndex);
3999 for (
unsigned i = 1; i < FinalIndex; ++i) {
4001 if (!
C || !
C->isZero())
4018 unsigned GEPWidth = GEP->getType()->getVectorNumElements();
4025 void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &I) {
4026 SDLoc sdl = getCurSDLoc();
4033 unsigned Alignment = (cast<ConstantInt>(I.
getArgOperand(2)))->getZExtValue();
4044 const Value *BasePtr = Ptr;
4045 bool UniformBase =
getUniformBase(BasePtr, Base, Index, Scale,
this);
4047 const Value *MemOpBasePtr = UniformBase ? BasePtr :
nullptr;
4054 Index = getValue(Ptr);
4061 setValue(&I, Scatter);
4064 void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &I,
bool IsExpanding) {
4065 SDLoc sdl = getCurSDLoc();
4068 unsigned& Alignment) {
4071 Alignment = cast<ConstantInt>(I.
getArgOperand(1))->getZExtValue();
4076 unsigned& Alignment) {
4084 Value *PtrOperand, *MaskOperand, *Src0Operand;
4087 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4089 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4091 SDValue Ptr = getValue(PtrOperand);
4092 SDValue Src0 = getValue(Src0Operand);
4093 SDValue Mask = getValue(MaskOperand);
4116 Alignment, AAInfo, Ranges);
4121 PendingLoads.push_back(Load.getValue(1));
4125 void SelectionDAGBuilder::visitMaskedGather(
const CallInst &I) {
4126 SDLoc sdl = getCurSDLoc();
4135 unsigned Alignment = (cast<ConstantInt>(I.
getArgOperand(1)))->getZExtValue();
4147 const Value *BasePtr = Ptr;
4148 bool UniformBase =
getUniformBase(BasePtr, Base, Index, Scale,
this);
4149 bool ConstantMemory =
false;
4150 if (UniformBase && AA &&
4151 AA->pointsToConstantMemory(
4158 ConstantMemory =
true;
4165 Alignment, AAInfo, Ranges);
4169 Index = getValue(Ptr);
4177 if (!ConstantMemory)
4178 PendingLoads.push_back(OutChain);
4179 setValue(&I, Gather);
4183 SDLoc dl = getCurSDLoc();
4196 0, SuccessOrder, FailureOrder, SSID);
4204 void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &I) {
4205 SDLoc dl = getCurSDLoc();
4241 void SelectionDAGBuilder::visitFence(
const FenceInst &I) {
4242 SDLoc dl = getCurSDLoc();
4253 void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &I) {
4254 SDLoc dl = getCurSDLoc();
4288 void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &I) {
4289 SDLoc dl = getCurSDLoc();
4316 void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &I,
4317 unsigned Intrinsic) {
4337 TargetLowering::IntrinsicInfo
Info;
4365 if (IsTgtIntrinsic) {
4370 Info.flags, Info.size);
4371 }
else if (!HasChain) {
4382 PendingLoads.push_back(Chain);
4392 Result = lowerRangeToAssertZExt(DAG, I, Result);
4394 setValue(&I, Result);
4453 SDValue TwoToFractionalPartOfX;
4454 if (LimitFloatPrecision <= 6) {
4469 }
else if (LimitFloatPrecision <= 12) {
4531 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4556 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4569 if (LimitFloatPrecision <= 6) {
4584 }
else if (LimitFloatPrecision <= 12) {
4654 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4667 if (LimitFloatPrecision <= 6) {
4680 }
else if (LimitFloatPrecision <= 12) {
4751 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4764 if (LimitFloatPrecision <= 6) {
4779 }
else if (LimitFloatPrecision <= 12) {
4839 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4850 bool IsExp10 =
false;
4852 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4855 IsExp10 = LHSC->isExactlyValue(Ten);
4884 unsigned Val = RHSC->getSExtValue();
4885 if ((
int)Val < 0) Val = -Val;
4913 CurSquare, CurSquare);
4918 if (RHSC->getSExtValue() < 0)
4948 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4958 bool IsIndirect =
false;
4961 int FI = FuncInfo.getArgumentFrameIndex(Arg);
4975 IsIndirect = IsDbgDeclare;
4983 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4989 if (VMI != FuncInfo.ValueMap.
end()) {
4993 if (RFV.occupiesMultipleRegs()) {
4995 for (
auto RegAndSize : RFV.getRegsAndSizes()) {
4998 Expr, Offset, RegAndSize.second);
5001 FuncInfo.ArgDbgValues.push_back(
5002 BuildMI(MF, DL, TII->
get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5003 Op->
getReg(), Variable, *FragmentExpr));
5004 Offset += RegAndSize.second;
5009 IsIndirect = IsDbgDeclare;
5017 "Expected inlined-at fields to agree");
5018 IsIndirect = (Op->
isReg()) ? IsIndirect :
true;
5019 FuncInfo.ArgDbgValues.push_back(
5020 BuildMI(MF, DL, TII->
get(TargetOpcode::DBG_VALUE), IsIndirect,
5021 *
Op, Variable, Expr));
5031 unsigned DbgSDNodeOrder) {
5032 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(N.
getNode())) {
5044 false, dl, DbgSDNodeOrder);
5047 false, dl, DbgSDNodeOrder);
5051 #if defined(_MSC_VER) && defined(setjmp) && \ 5052 !defined(setjmp_undefined_for_msvc) 5053 # pragma push_macro("setjmp") 5055 # define setjmp_undefined_for_msvc 5062 SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &I,
unsigned Intrinsic) {
5064 SDLoc sdl = getCurSDLoc();
5068 switch (Intrinsic) {
5071 visitTargetIntrinsic(I, Intrinsic);
5098 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5111 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5113 RegName, getValue(RegValue)));
5121 const auto &MCI = cast<MemCpyInst>(
I);
5126 unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1);
5127 unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1);
5129 bool isVol = MCI.isVolatile();
5137 updateDAGForMaybeTailCall(MC);
5141 const auto &MSI = cast<MemSetInst>(
I);
5146 unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
5147 bool isVol = MSI.isVolatile();
5151 updateDAGForMaybeTailCall(MS);
5155 const auto &MMI = cast<MemMoveInst>(
I);
5160 unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1);
5161 unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1);
5163 bool isVol = MMI.isVolatile();
5170 updateDAGForMaybeTailCall(MM);
5185 SrcAlign, Length, LengthTy, ElemSz, isTC,
5188 updateDAGForMaybeTailCall(MC);
5192 auto &
MI = cast<AtomicMemMoveInst>(
I);
5193 SDValue Dst = getValue(
MI.getRawDest());
5194 SDValue Src = getValue(
MI.getRawSource());
5195 SDValue Length = getValue(
MI.getLength());
5197 unsigned DstAlign =
MI.getDestAlignment();
5198 unsigned SrcAlign =
MI.getSourceAlignment();
5199 Type *LengthTy =
MI.getLength()->getType();
5200 unsigned ElemSz =
MI.getElementSizeInBytes();
5203 SrcAlign, Length, LengthTy, ElemSz, isTC,
5206 updateDAGForMaybeTailCall(MC);
5210 auto &
MI = cast<AtomicMemSetInst>(
I);
5211 SDValue Dst = getValue(
MI.getRawDest());
5213 SDValue Length = getValue(
MI.getLength());
5215 unsigned DstAlign =
MI.getDestAlignment();
5216 Type *LengthTy =
MI.getLength()->getType();
5217 unsigned ElemSz =
MI.getElementSizeInBytes();
5220 LengthTy, ElemSz, isTC,
5222 updateDAGForMaybeTailCall(MC);
5227 const auto &DI = cast<DbgVariableIntrinsic>(
I);
5230 dropDanglingDebugInfo(Variable, Expression);
5231 assert(Variable &&
"Missing variable");
5235 if (!Address || isa<UndefValue>(Address) ||
5241 bool isParameter = Variable->isParameter() || isa<Argument>(
Address);
5246 if (
const auto *AI =
5248 if (AI->isStaticAlloca()) {
5249 auto I = FuncInfo.StaticAllocaMap.find(AI);
5250 if (I != FuncInfo.StaticAllocaMap.end())
5253 }
else if (
const auto *
Arg = dyn_cast<Argument>(
5255 FI = FuncInfo.getArgumentFrameIndex(
Arg);
5264 Variable, Expression, FI,
true, dl, SDNodeOrder);
5265 DAG.
AddDbgValue(SDV, getRoot().getNode(), isParameter);
5273 N = UnusedArgNodeMap[
Address];
5276 if (
const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5277 Address = BCI->getOperand(0);
5280 if (isParameter && FINode) {
5284 true, dl, SDNodeOrder);
5285 }
else if (isa<Argument>(Address)) {
5288 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
true, N);
5292 true, dl, SDNodeOrder);
5298 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
true,
5308 assert(Label &&
"Missing label");
5321 dropDanglingDebugInfo(Variable, Expression);
5327 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
5328 isa<ConstantPointerNull>(V)) {
5337 if (!N.
getNode() && isa<Argument>(V))
5338 N = UnusedArgNodeMap[V];
5340 if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl,
false, N))
5342 SDV = getDbgValue(N, Variable, Expression, dl, SDNodeOrder);
5349 if (isa<PHINode>(V)) {
5350 auto VMI = FuncInfo.ValueMap.find(V);
5351 if (VMI != FuncInfo.ValueMap.end()) {
5352 unsigned Reg = VMI->second;
5357 if (RFV.occupiesMultipleRegs()) {
5359 unsigned BitsToDescribe = 0;
5360 if (
auto VarSize = Variable->getSizeInBits())
5361 BitsToDescribe = *VarSize;
5363 BitsToDescribe = Fragment->SizeInBits;
5364 for (
auto RegAndSize : RFV.getRegsAndSizes()) {
5365 unsigned RegisterSize = RegAndSize.second;
5367 if (Offset >= BitsToDescribe)
5369 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
5370 ? BitsToDescribe - Offset
5373 Expression, Offset, FragmentSize);
5377 false, dl, SDNodeOrder);
5379 Offset += RegisterSize;
5399 DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5403 LLVM_DEBUG(
dbgs() <<
"Dropping debug location info for:\n " << DI <<
"\n");
5437 assert(CI &&
"Non-constant call site value in eh.sjlj.callsite!");
5448 int FI = FuncInfo.StaticAllocaMap[FnCtx];
5458 setValue(&I, Op.getValue(0));
5471 visitMaskedGather(I);
5477 visitMaskedScatter(I);
5480 visitMaskedStore(I);
5483 visitMaskedLoad(I,
true );
5486 visitMaskedStore(I,
true );
5497 if (isa<ConstantSDNode>(ShAmt)) {
5498 visitTargetIntrinsic(I, Intrinsic);
5501 unsigned NewIntrinsic = 0;
5503 switch (Intrinsic) {
5582 switch (Intrinsic) {
5597 setValue(&I, DAG.
getNode(Opcode, sdl,
5608 setValue(&I, DAG.
getNode(Opc, sdl, VT,
5619 setValue(&I, DAG.
getNode(Opc, sdl, VT,
5673 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
5769 setValue(&I, DAG.
getNode(FunnelOpcode, sdl, VT, X, Y, Z));
5779 setValue(&I, DAG.
getNode(RotateOpcode, sdl, VT, X, Z));
5788 setValue(&I, DAG.
getNode(RotateOpcode, sdl, VT, X, NegShAmt));
5818 setValue(&I, DAG.
getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
5890 unsigned Align = DL->getPrefTypeAlignment(Global->
getType());
5891 Res = DAG.
getLoad(PtrTy, sdl, Chain, getValue(Global),
5906 SDValue Src, Chain = getRoot();
5915 int FI = FuncInfo.StaticAllocaMap[Slot];
5932 assert(CI &&
"Non-constant type in __builtin_object_size?");
6000 "only valid in functions with gc specified, enforced by Verifier");
6001 assert(GFI &&
"implied by previous");
6006 GFI->addStackRoot(FI->getIndex(), TypeMap);
6027 if (TrapFuncName.
empty()) {
6042 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
6054 switch (Intrinsic) {
6067 setValue(&I, DAG.
getNode(Op, sdl, VTs, Op1, Op2));
6072 unsigned rw = cast<ConstantInt>(I.
getArgOperand(1))->getZExtValue();
6088 PendingLoads.push_back(Result);
6104 E = Allocas.
end(); Object !=
E; ++Object) {
6105 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6108 if (!LifetimeObject)
6113 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6114 if (
SI == FuncInfo.StaticAllocaMap.end())
6117 int FI =
SI->second;
6147 visitPatchpoint(&I);
6153 visitGCResult(cast<GCResultInst>(I));
6156 visitGCRelocate(cast<GCRelocateInst>(I));
6170 if (isa<ConstantPointerNull>(Arg))
6173 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6174 "can only escape static allocas");
6175 int FI = FuncInfo.StaticAllocaMap[Slot];
6179 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6180 TII->
get(TargetOpcode::LOCAL_ESCAPE))
6181 .addSym(FrameAllocSym)
6225 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6241 SDLoc DL = getCurSDLoc();
6261 setValue(&I, patchableNode);
6272 SDLoc DL = getCurSDLoc();
6293 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6296 setValue(&I, patchableNode);
6300 LowerDeoptimizeCall(&I);
6316 visitVectorReduce(I, Intrinsic);
6329 "llvm.icall.branch.funnel operand must be a GlobalValue");
6332 struct BranchFunnelTarget {
6341 if (ElemBase !=
Base)
6343 "to the same GlobalValue");
6349 "llvm.icall.branch.funnel operand must be a GlobalValue");
6351 GA->getGlobal(), getCurSDLoc(),
6355 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
6356 return T1.Offset < T2.Offset;
6359 for (
auto &
T : Targets) {
6381 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6383 SDLoc sdl = getCurSDLoc();
6469 Result = DAG.
getNode(Opcode, sdl, VTs,
6470 { Chain, getValue(FPI.getArgOperand(0)) });
6472 Result = DAG.
getNode(Opcode, sdl, VTs,
6473 { Chain, getValue(FPI.getArgOperand(0)),
6474 getValue(FPI.getArgOperand(1)),
6475 getValue(FPI.getArgOperand(2)) });
6477 Result = DAG.
getNode(Opcode, sdl, VTs,
6485 setValue(&FPI, FPResult);
6488 std::pair<SDValue, SDValue>
6503 if (CallSiteIndex) {
6505 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
6519 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
6522 "Non-null chain expected with non-tail call!");
6523 assert((Result.second.getNode() || !Result.first.getNode()) &&
6524 "Null value expected with tail call!");
6526 if (!Result.second.getNode()) {
6533 PendingExports.clear();
6552 BeginLabel, EndLabel);
6554 MF.
addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
6571 const Value *SwiftErrorVal =
nullptr;
6585 const Value *V = *i;
6591 SDValue ArgNode = getValue(V);
6602 .getOrCreateSwiftErrorVRegUseAt(
6608 Args.push_back(Entry);
6612 if (Entry.
IsSRet && isa<Instruction>(V))
6629 .
setCallee(RetTy, FTy, Callee, std::move(Args), CS)
6632 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
6634 if (Result.first.getNode()) {
6636 Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
6637 setValue(Inst, Result.first);
6646 unsigned VReg;
bool CreatedVReg;
6647 std::tie(VReg, CreatedVReg) =
6652 FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
6661 if (
const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
6672 const_cast<Constant *>(LoadInput), LoadTy, *Builder.
DL))
6679 bool ConstantMemory =
false;
6684 ConstantMemory =
true;
6695 if (!ConstantMemory)
6702 void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &I,
6711 setValue(&I, Value);
6719 bool SelectionDAGBuilder::visitMemCmpCall(
const CallInst &I) {
6726 setValue(&I, DAG.
getConstant(0, getCurSDLoc(), CallVT));
6732 DAG, getCurSDLoc(), DAG.
getRoot(), getValue(LHS), getValue(RHS),
6734 if (Res.first.getNode()) {
6735 processIntegerCallValue(I, Res.first,
true);
6736 PendingLoads.push_back(Res.second);
6749 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
6757 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
6772 switch (NumBitsToCompare) {
6784 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
6802 processIntegerCallValue(I, Cmp,
false);
6811 bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &I) {
6817 std::pair<SDValue, SDValue> Res =
6819 getValue(Src), getValue(Char), getValue(Length),
6821 if (Res.first.getNode()) {
6822 setValue(&I, Res.first);
6823 PendingLoads.push_back(Res.second);
6835 bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &I) {
6842 unsigned Align = std::min(DstAlign, SrcAlign);
6847 SDLoc sdl = getCurSDLoc();
6857 "** memcpy should not be lowered as TailCall in mempcpy context **");
6866 setValue(&I, DstPlusSize);
6875 bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &I,
bool isStpcpy) {
6879 std::pair<SDValue, SDValue> Res =
6881 getValue(Arg0), getValue(Arg1),
6884 if (Res.first.getNode()) {
6885 setValue(&I, Res.first);
6898 bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &I) {
6902 std::pair<SDValue, SDValue> Res =
6904 getValue(Arg0), getValue(Arg1),
6907 if (Res.first.getNode()) {
6908 processIntegerCallValue(I, Res.first,
true);
6909 PendingLoads.push_back(Res.second);
6921 bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &I) {
6925 std::pair<SDValue, SDValue> Res =
6928 if (Res.first.getNode()) {
6929 processIntegerCallValue(I, Res.first,
false);
6930 PendingLoads.push_back(Res.second);
6942 bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &I) {
6946 std::pair<SDValue, SDValue> Res =
6948 getValue(Arg0), getValue(Arg1),
6950 if (Res.first.getNode()) {
6951 processIntegerCallValue(I, Res.first,
false);
6952 PendingLoads.push_back(Res.second);
6964 bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &I,
6980 bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &I,
6989 setValue(&I, DAG.
getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
6993 void SelectionDAGBuilder::visitCall(
const CallInst &I) {
7003 const char *RenameFn =
nullptr;
7005 if (
F->isDeclaration()) {
7007 unsigned IID =
F->getIntrinsicID();
7010 IID = II->getIntrinsicID(
F);
7013 RenameFn = visitIntrinsicCall(I, IID);
7024 F->hasName() && LibInfo->getLibFunc(*
F, Func) &&
7025 LibInfo->hasOptimizedCodeGen(Func)) {
7028 case LibFunc_copysign:
7029 case LibFunc_copysignf:
7030 case LibFunc_copysignl:
7074 case LibFunc_sqrt_finite:
7075 case LibFunc_sqrtf_finite:
7076 case LibFunc_sqrtl_finite:
7081 case LibFunc_floorf:
7082 case LibFunc_floorl:
7086 case LibFunc_nearbyint:
7087 case LibFunc_nearbyintf:
7088 case LibFunc_nearbyintl:
7105 case LibFunc_roundf:
7106 case LibFunc_roundl:
7111 case LibFunc_truncf:
7112 case LibFunc_truncl:
7128 case LibFunc_memcmp:
7129 if (visitMemCmpCall(I))
7132 case LibFunc_mempcpy:
7133 if (visitMemPCpyCall(I))
7136 case LibFunc_memchr:
7137 if (visitMemChrCall(I))
7140 case LibFunc_strcpy:
7141 if (visitStrCpyCall(I,
false))
7144 case LibFunc_stpcpy:
7145 if (visitStrCpyCall(I,
true))
7148 case LibFunc_strcmp:
7149 if (visitStrCmpCall(I))
7152 case LibFunc_strlen:
7153 if (visitStrLenCall(I))
7156 case LibFunc_strnlen:
7157 if (visitStrNLenCall(I))
7175 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
7176 "Cannot lower calls with arbitrary operand bundles!");
7179 LowerCallSiteWithDeoptBundle(&I, Callee,
nullptr);
7212 for (
const auto &Code : Codes)
7226 if (isa<BasicBlock>(CallOperandVal))
7229 llvm::Type *OpTy = CallOperandVal->getType();
7242 if (
StructType *STy = dyn_cast<StructType>(OpTy))
7243 if (STy->getNumElements() == 1)
7244 OpTy = STy->getElementType(0);
7275 SDISelAsmOperandInfo &MatchingOpInfo,
7277 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7283 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7285 OpInfo.ConstraintVT);
7286 std::pair<unsigned, const TargetRegisterClass *> InputRC =
7287 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7288 MatchingOpInfo.ConstraintVT);
7289 if ((OpInfo.ConstraintVT.isInteger() !=
7290 MatchingOpInfo.ConstraintVT.isInteger()) ||
7291 (MatchRC.second != InputRC.second)) {
7294 " with a matching output constraint of" 7295 " incompatible type!");
7297 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7304 SDISelAsmOperandInfo &OpInfo,
7317 const Value *OpVal = OpInfo.CallOperandVal;
7318 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7319 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7328 uint64_t TySize = DL.getTypeAllocSize(Ty);
7329 unsigned Align = DL.getPrefTypeAlignment(Ty);
7333 Chain = DAG.
getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7335 OpInfo.CallOperand = StackSlot;
7349 SDISelAsmOperandInfo &OpInfo,
7350 SDISelAsmOperandInfo &RefOpInfo) {
7360 unsigned AssignedReg;
7363 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
7387 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7392 OpInfo.CallOperand =
7394 OpInfo.ConstraintVT = RegVT;
7398 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7401 OpInfo.CallOperand =
7403 OpInfo.ConstraintVT = VT;
7410 if (OpInfo.isMatchingInputConstraint())
7413 EVT ValueVT = OpInfo.ConstraintVT;
7418 unsigned NumRegs = 1;
7434 for (; *I != AssignedReg; ++
I)
7435 assert(I != RC->
end() &&
"AssignedReg should be member of RC");
7438 for (; NumRegs; --NumRegs, ++
I) {
7439 assert(I != RC->
end() &&
"Ran out of registers to allocate!");
7444 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
7449 const std::vector<SDValue> &AsmNodeOperands) {
7452 for (; OperandNo; --OperandNo) {
7455 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7459 "Skipped past definitions?");
7498 unsigned get()
const {
return Flags; }
7508 SDISelAsmOperandInfoVector ConstraintOperands;
7514 bool hasMemory =
false;
7517 ExtraFlags ExtraInfo(CS);
7521 for (
auto &
T : TargetConstraints) {
7522 ConstraintOperands.push_back(SDISelAsmOperandInfo(
T));
7523 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
7528 OpInfo.CallOperandVal =
const_cast<Value *
>(CS.
getArgument(ArgNo++));
7532 if (
const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
7533 OpInfo.CallOperand = DAG.
getBasicBlock(FuncInfo.MBBMap[BB]);
7535 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
7538 OpInfo.ConstraintVT =
7550 assert(ResNo == 0 &&
"Asm only has one result!");
7551 OpInfo.ConstraintVT =
7560 hasMemory = OpInfo.hasMemory(TLI);
7568 ExtraInfo.update(
T);
7582 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
7587 if (OpInfo.hasMatchingInput()) {
7588 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
7602 !OpInfo.isIndirect) {
7603 assert((OpInfo.isMultipleAlternative ||
7605 "Can only indirectify direct input operands!");
7611 OpInfo.CallOperandVal =
nullptr;
7614 OpInfo.isIndirect =
true;
7619 SDISelAsmOperandInfo &RefOpInfo =
7620 OpInfo.isMatchingInputConstraint()
7621 ? ConstraintOperands[OpInfo.getMatchedOperand()]
7629 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
7630 SDISelAsmOperandInfo &RefOpInfo =
7631 OpInfo.isMatchingInputConstraint()
7632 ? ConstraintOperands[OpInfo.getMatchedOperand()]
7642 std::vector<SDValue> AsmNodeOperands;
7643 AsmNodeOperands.push_back(
SDValue());
7651 AsmNodeOperands.push_back(DAG.
getMDNode(SrcLoc));
7663 std::vector<std::pair<RegsForValue, Value *>> IndirectStoresToEmit;
7665 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
7666 switch (OpInfo.Type) {
7671 assert(OpInfo.isIndirect &&
"Memory output must be indirect operand");
7673 unsigned ConstraintID =
7676 "Failed to convert memory constraint code to constraint id.");
7683 AsmNodeOperands.push_back(OpInfo.CallOperand);
7691 if (OpInfo.AssignedRegs.Regs.empty()) {
7693 CS,
"couldn't allocate output register for constraint '" +
7694 Twine(OpInfo.ConstraintCode) +
"'");
7700 if (OpInfo.isIndirect) {
7701 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
7702 OpInfo.CallOperandVal));
7707 RetValRegs.
append(OpInfo.AssignedRegs);
7713 .AddInlineAsmOperands(OpInfo.isEarlyClobber
7716 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7720 SDValue InOperandVal = OpInfo.CallOperand;
7722 if (OpInfo.isMatchingInputConstraint()) {
7728 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7732 if (OpInfo.isIndirect) {
7734 emitInlineAsmError(CS,
"inline asm not supported yet:" 7735 " don't know how to handle tied " 7736 "indirect register inputs");
7740 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7747 for (
unsigned i = 0; i != NumRegs; ++i)
7750 emitInlineAsmError(CS,
"inline asm error: This value type register " 7751 "class is not natively supported!");
7757 SDLoc dl = getCurSDLoc();
7759 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
7762 true, OpInfo.getMatchedOperand(), dl,
7763 DAG, AsmNodeOperands);
7769 "Unexpected number of operands");
7774 OpInfo.getMatchedOperand());
7777 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
7787 std::vector<SDValue> Ops;
7791 emitInlineAsmError(CS,
"invalid operand for inline asm constraint '" +
7792 Twine(OpInfo.ConstraintCode) +
"'");
7797 unsigned ResOpType =
7801 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
7806 assert(OpInfo.isIndirect &&
"Operand must be indirect to be a mem!");
7809 "Memory operands expect pointer values");
7811 unsigned ConstraintID =
7814 "Failed to convert memory constraint code to constraint id.");
7822 AsmNodeOperands.push_back(InOperandVal);
7828 "Unknown constraint type!");
7831 if (OpInfo.isIndirect) {
7833 CS,
"Don't know how to handle indirect register inputs yet " 7834 "for constraint '" +
7835 Twine(OpInfo.ConstraintCode) +
"'");
7840 if (OpInfo.AssignedRegs.Regs.empty()) {
7841 emitInlineAsmError(CS,
"couldn't allocate input reg for constraint '" +
7842 Twine(OpInfo.ConstraintCode) +
"'");
7846 SDLoc dl = getCurSDLoc();
7848 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7852 dl, DAG, AsmNodeOperands);
7858 if (!OpInfo.AssignedRegs.Regs.empty())
7860 false, 0, getCurSDLoc(), DAG,
7868 if (Flag.
getNode()) AsmNodeOperands.push_back(Flag);
7884 if (
StructType *StructResult = dyn_cast<StructType>(CSResultType)) {
7885 numRet = StructResult->getNumElements();
7887 "Mismatch in number of output operands in asm result");
7888 ResultTypes = StructResult->elements();
7890 ResultValues.
resize(numRet);
7895 ResultValues[0] = Val;
7899 for (
unsigned i = 0; i < numRet; i++) {
7901 SDValue Val = ResultValues[i];
7902 assert(ResultTypes[i]->isSized() &&
"Unexpected unsized type");
7925 ResultVTs[i] = ResultVT;
7926 ResultValues[i] = Val;
7930 DAG.
getVTList(ResultVTs), ResultValues);
7933 if (!IA->
hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
7937 std::vector<std::pair<SDValue, const Value *>> StoresToEmit;
7941 for (
unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
7943 const Value *Ptr = IndirectStoresToEmit[i].second;
7946 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
7951 for (
unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
7953 getValue(StoresToEmit[i].
second),
7958 if (!OutChains.
empty())
7965 const Twine &Message) {
7974 if (ValueVTs.
empty())
7978 for (
unsigned i = 0, e = ValueVTs.
size(); i != e; ++i)
7984 void SelectionDAGBuilder::visitVAStart(
const CallInst &I) {
7991 void SelectionDAGBuilder::visitVAArg(
const VAArgInst &I) {
7995 getCurSDLoc(), getRoot(), getValue(I.
getOperand(0)),
8002 void SelectionDAGBuilder::visitVAEnd(
const CallInst &I) {
8009 void SelectionDAGBuilder::visitVACopy(
const CallInst &I) {
8039 SDLoc SL = getCurSDLoc();
8050 for (
unsigned I = 1; I != NumVals; ++
I)
8064 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
8065 bool IsPatchPoint) {
8067 Args.reserve(NumArgs);
8071 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8072 ArgI != ArgE; ++ArgI) {
8073 const Value *V = CS->getOperand(ArgI);
8078 Entry.
Node = getValue(V);
8080 Entry.setAttributes(&CS, ArgI);
8081 Args.push_back(Entry);
8087 .setDiscardResult(CS->use_empty())
8088 .setIsPatchPoint(IsPatchPoint);
8111 for (
unsigned i = StartIdx, e = CS.
arg_size(); i != e; ++i) {
8128 void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
8137 SDLoc DL = getCurSDLoc();
8157 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL,
MVT::i64));
8160 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8177 InFlag = Chain.getValue(1);
8187 FuncInfo.MF->getFrameInfo().setHasStackMap();
8203 SDLoc dl = getCurSDLoc();
8207 if (
auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8210 else if (
auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8212 SDLoc(SymbolicCallee),
8213 SymbolicCallee->getValueType(0));
8217 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8223 "Not enough arguments provided to the patchpoint intrinsic");
8226 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8231 populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
8233 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8235 SDNode *CallEnd = Result.second.getNode();
8242 "Expected a callseq node.");
8252 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl,
MVT::i64));
8255 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8264 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8265 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8274 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8279 Ops.
append(Call->op_begin() + 2, e);
8299 if (IsAnyRegCC && HasDef) {
8304 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
8329 if (IsAnyRegCC && HasDef) {
8338 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8341 void SelectionDAGBuilder::visitVectorReduce(
const CallInst &I,
8342 unsigned Intrinsic) {
8348 SDLoc dl = getCurSDLoc();
8352 if (isa<FPMathOperator>(I))
8355 switch (Intrinsic) {
8426 std::pair<SDValue, SDValue>
8440 for (
size_t i = 0, e = OldRetTys.
size(); i != e; ++i) {
8441 EVT RetVT = OldRetTys[i];
8442 uint64_t
Offset = OldOffsets[i];
8446 RetTys.
append(NumRegs, RegisterVT);
8447 for (
unsigned j = 0; j != NumRegs; ++j)
8448 Offsets.
push_back(Offset + j * RegisterVTByteSZ);
8455 bool CanLowerReturn =
8460 int DemoteStackIdx = -100;
8461 if (!CanLowerReturn) {
8465 uint64_t TySize = DL.getTypeAllocSize(CLI.
RetTy);
8466 unsigned Align = DL.getPrefTypeAlignment(CLI.
RetTy);
8470 DL.getAllocaAddrSpace());
8472 DemoteStackSlot = CLI.
DAG.
getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
8474 Entry.
Node = DemoteStackSlot;
8475 Entry.
Ty = StackSlotPtrType;
8494 for (
unsigned I = 0,
E = RetTys.
size(); I !=
E; ++
I) {
8498 unsigned NumRegs = getNumRegistersForCallingConv(CLI.
RetTy->
getContext(),
8500 for (
unsigned i = 0; i != NumRegs; ++i) {
8502 MyFlags.
VT = RegisterVT;
8511 CLI.
Ins.push_back(MyFlags);
8518 if (supportSwiftError()) {
8519 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
8520 if (Args[i].IsSwiftError) {
8522 MyFlags.
VT = getPointerTy(DL);
8523 MyFlags.
ArgVT =
EVT(getPointerTy(DL));
8525 CLI.
Ins.push_back(MyFlags);
8533 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
8537 Type *FinalType = Args[i].Ty;
8538 if (Args[i].IsByVal)
8539 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
8540 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
8542 for (
unsigned Value = 0, NumValues = ValueVTs.
size(); Value != NumValues;
8544 EVT VT = ValueVTs[Value];
8547 Args[i].Node.getResNo() + Value);
8553 unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
8559 if (Args[i].IsInReg) {
8563 isa<StructType>(FinalType)) {
8566 Flags.setHvaStart();
8574 if (Args[i].IsSwiftSelf)
8575 Flags.setSwiftSelf();
8576 if (Args[i].IsSwiftError)
8577 Flags.setSwiftError();
8578 if (Args[i].IsByVal)
8580 if (Args[i].IsInAlloca) {
8581 Flags.setInAlloca();
8589 if (Args[i].IsByVal || Args[i].IsInAlloca) {
8592 Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8595 unsigned FrameAlign;
8596 if (Args[i].Alignment)
8597 FrameAlign = Args[i].Alignment;
8599 FrameAlign = getByValTypeAlignment(ElementTy, DL);
8600 Flags.setByValAlign(FrameAlign);
8605 Flags.setInConsecutiveRegs();
8606 Flags.setOrigAlign(OriginalAlignment);
8610 unsigned NumParts = getNumRegistersForCallingConv(CLI.
RetTy->
getContext(),
8617 else if (Args[i].IsZExt)
8625 "unexpected use of 'returned'");
8638 CLI.
RetZExt == Args[i].IsZExt))
8639 Flags.setReturned();
8645 for (
unsigned j = 0; j != NumParts; ++j) {
8649 i, j*Parts[j].getValueType().getStoreSize());
8650 if (NumParts > 1 && j == 0)
8653 MyFlags.Flags.setOrigAlign(1);
8654 if (j == NumParts - 1)
8655 MyFlags.Flags.setSplitEnd();
8658 CLI.
Outs.push_back(MyFlags);
8659 CLI.
OutVals.push_back(Parts[j]);
8662 if (NeedsRegBlock && Value == NumValues - 1)
8663 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
8668 CLI.
Chain = LowerCall(CLI, InVals);
8675 "LowerCall didn't return a valid chain!");
8677 "LowerCall emitted a return value for a tail call!");
8679 "LowerCall didn't emit the correct number of values!");
8691 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
8692 assert(InVals[i].getNode() &&
"LowerCall emitted a null value!");
8693 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
8694 "LowerCall emitted a value with the wrong type!");
8699 if (!CanLowerReturn) {
8706 assert(PVTs.
size() == 1 &&
"Pointers should fit in one register");
8707 EVT PtrVT = PVTs[0];
8709 unsigned NumValues = RetTys.
size();
8710 ReturnValues.resize(NumValues);
8718 for (
unsigned i = 0; i < NumValues; ++i) {
8723 RetTys[i], CLI.
DL, CLI.
Chain, Add,
8725 DemoteStackIdx, Offsets[i]),
8727 ReturnValues[i] = L;
8740 unsigned CurReg = 0;
8741 for (
unsigned I = 0,
E = RetTys.
size(); I !=
E; ++
I) {
8745 unsigned NumRegs = getNumRegistersForCallingConv(CLI.
RetTy->
getContext(),
8749 NumRegs, RegisterVT, VT,
nullptr,
8757 if (ReturnValues.empty())
8763 return std::make_pair(Res, CLI.
Chain);
8782 "Copy from a reg to the same reg!");
8793 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
8794 FuncInfo.PreferredExtendType.end())
8796 : FuncInfo.PreferredExtendType[V];
8797 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain,
nullptr, V, ExtendType);
8798 PendingExports.push_back(Chain);
8814 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
8822 std::pair<const AllocaInst *, const StoreInst *>>;
8834 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
8837 StaticAllocas.
reserve(NumArgs * 2);
8839 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
8844 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
8847 return &Iter.first->second;
8863 if (isa<DbgInfoIntrinsic>(I))
8868 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
8869 *
Info = StaticAllocaInfo::Clobbered;
8875 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
8876 *
Info = StaticAllocaInfo::Clobbered;
8879 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
8880 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
8883 const AllocaInst *AI = cast<AllocaInst>(Dst);
8891 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
8893 if (!
Arg ||
Arg->hasInAllocaAttr() ||
Arg->hasByValAttr() ||
8897 ArgCopyElisionCandidates.
count(
Arg)) {
8898 *Info = StaticAllocaInfo::Clobbered;
8902 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
8906 *Info = StaticAllocaInfo::Elidable;
8912 if (ArgCopyElisionCandidates.
size() == NumArgs)
8924 SDValue ArgVal,
bool &ArgHasUses) {
8936 auto ArgCopyIter = ArgCopyElisionCandidates.
find(&Arg);
8937 assert(ArgCopyIter != ArgCopyElisionCandidates.
end());
8938 const AllocaInst *AI = ArgCopyIter->second.first;
8939 int FixedIndex = FINode->getIndex();
8941 int OldIndex = AllocaIndex;
8945 dbgs() <<
" argument copy elision failed due to bad fixed stack " 8949 unsigned RequiredAlignment = AI->getAlignment();
8950 if (!RequiredAlignment) {
8952 AI->getAllocatedType());
8955 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca " 8956 "greater than stack argument alignment (" 8957 << RequiredAlignment <<
" vs " 8965 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n' 8966 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
8971 AllocaIndex = FixedIndex;
8972 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
8977 ElidedArgCopyInstrs.
insert(SI);
8981 for (
const Value *U : Arg.
users()) {
8989 void SelectionDAGISel::LowerArguments(
const Function &
F) {
8991 SDLoc dl = SDB->getCurSDLoc();
8995 if (!FuncInfo->CanLowerReturn) {
9007 MVT RegisterVT = TLI->getRegisterType(*DAG.
getContext(), ValueVTs[0]);
9020 for (
const Argument &
Arg : F.
args()) {
9021 unsigned ArgNo =
Arg.getArgNo();
9025 unsigned PartBase = 0;
9028 FinalType = cast<PointerType>(FinalType)->getElementType();
9029 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9031 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
9032 Value != NumValues; ++Value) {
9033 EVT VT = ValueVTs[Value];
9040 unsigned OriginalAlignment =
9041 TLI->getABIAlignmentForCallingConv(ArgTy, DL);
9088 unsigned FrameAlign;
9089 if (
Arg.getParamAlignment())
9090 FrameAlign =
Arg.getParamAlignment();
9092 FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9100 if (ArgCopyElisionCandidates.
count(&
Arg))
9103 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9105 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9107 for (
unsigned i = 0; i != NumRegs; ++i) {
9108 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9110 if (NumRegs > 1 && i == 0)
9114 MyFlags.Flags.setOrigAlign(1);
9115 if (i == NumRegs - 1)
9116 MyFlags.Flags.setSplitEnd();
9120 if (NeedsRegBlock && Value == NumValues - 1)
9121 Ins[Ins.
size() - 1].Flags.setInConsecutiveRegsLast();
9128 SDValue NewRoot = TLI->LowerFormalArguments(
9133 "LowerFormalArguments didn't return a valid chain!");
9135 "LowerFormalArguments didn't emit the correct number of values!");
9137 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
9138 assert(InVals[i].getNode() &&
9139 "LowerFormalArguments emitted a null value!");
9140 assert(
EVT(Ins[i].VT) == InVals[i].getValueType() &&
9141 "LowerFormalArguments emitted a value with the wrong type!");
9146 DAG.setRoot(NewRoot);
9150 if (!FuncInfo->CanLowerReturn) {
9156 DAG.getDataLayout().getAllocaAddrSpace()),
9158 MVT VT = ValueVTs[0].getSimpleVT();
9159 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9167 FuncInfo->DemoteRegister = SRetReg;
9169 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9170 DAG.setRoot(NewRoot);
9178 for (
const Argument &
Arg : F.
args()) {
9182 unsigned NumValues = ValueVTs.
size();
9190 if (Ins[i].Flags.isCopyElisionCandidate()) {
9192 ElidedArgCopyInstrs, ArgCopyElisionCandidates,
Arg,
9193 InVals[i], ArgHasUses);
9198 bool isSwiftErrorArg =
9199 TLI->supportSwiftError() &&
9201 if (!ArgHasUses && !isSwiftErrorArg) {
9202 SDB->setUnusedArgValue(&
Arg, InVals[i]);
9206 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9207 FuncInfo->setArgumentFrameIndex(&
Arg, FI->getIndex());
9210 for (
unsigned Val = 0; Val != NumValues; ++Val) {
9211 EVT VT = ValueVTs[Val];
9212 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9214 unsigned NumParts = TLI->getNumRegistersForCallingConv(
9220 if (ArgHasUses || isSwiftErrorArg) {
9228 PartVT, VT,
nullptr,
9236 if (ArgValues.
empty())
9241 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9242 FuncInfo->setArgumentFrameIndex(&
Arg, FI->getIndex());
9245 SDB->getCurSDLoc());
9247 SDB->setValue(&
Arg, Res);
9256 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9260 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9261 FuncInfo->setArgumentFrameIndex(&
Arg, FI->getIndex());
9268 FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
9269 FuncInfo->SwiftErrorArg, Reg);
9281 FuncInfo->ValueMap[&
Arg] =
Reg;
9286 FuncInfo->InitializeRegForValue(&
Arg);
9287 SDB->CopyToExportRegsIfNeeded(&
Arg);
9291 if (!Chains.
empty()) {
9296 DAG.setRoot(NewRoot);
9298 assert(i == InVals.
size() &&
"Argument register count mismatch!");
9303 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.
empty()) {
9305 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.Slot);
9306 if (I != ArgCopyElisionFrameIndexMap.
end())
9307 VI.Slot = I->second;
9312 EmitFunctionEntryCode();
9322 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
9331 if (!isa<PHINode>(SuccBB->
begin()))
continue;
9336 if (!SuccsHandled.
insert(SuccMBB).second)
9350 if (PN.getType()->isEmptyTy())
9354 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
9356 if (
const Constant *
C = dyn_cast<Constant>(PHIOp)) {
9357 unsigned &RegOut = ConstantsOut[
C];
9359 RegOut = FuncInfo.CreateRegs(
C->
getType());
9360 CopyValueToVirtualRegister(
C, RegOut);
9365 FuncInfo.ValueMap.find(PHIOp);
9366 if (I != FuncInfo.ValueMap.
end())
9369 assert(isa<AllocaInst>(PHIOp) &&
9370 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
9371 "Didn't codegen value into a register!??");
9372 Reg = FuncInfo.CreateRegs(PHIOp->
getType());
9373 CopyValueToVirtualRegister(PHIOp, Reg);
9382 for (
unsigned vti = 0, vte = ValueVTs.
size(); vti != vte; ++vti) {
9383 EVT VT = ValueVTs[vti];
9385 for (
unsigned i = 0, e = NumRegisters; i != e; ++i)
9386 FuncInfo.PHINodesToUpdate.push_back(
9387 std::make_pair(&*MBBI++,
Reg + i));
9388 Reg += NumRegisters;
9393 ConstantsOut.clear();
9399 SelectionDAGBuilder::StackProtectorDescriptor::
9409 MF->
insert(++BBI, SuccMBB);
9419 if (++I == FuncInfo.MF->end())
9429 void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
9431 if (MaybeTC.
getNode() !=
nullptr)
9432 DAG.setRoot(MaybeTC);
9438 SelectionDAGBuilder::getJumpTableRange(
const CaseClusterVector &Clusters,
9439 unsigned First,
unsigned Last)
const {
9441 const APInt &LowCase = Clusters[First].Low->getValue();
9442 const APInt &HighCase = Clusters[Last].High->getValue();
9449 return (HighCase - LowCase).getLimitedValue((
UINT64_MAX - 1) / 100) + 1;
9452 uint64_t SelectionDAGBuilder::getJumpTableNumCases(
9454 unsigned Last)
const {
9456 assert(TotalCases[Last] >= TotalCases[First]);
9458 TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
9462 bool SelectionDAGBuilder::buildJumpTable(
const CaseClusterVector &Clusters,
9463 unsigned First,
unsigned Last,
9466 CaseCluster &JTCluster) {
9470 unsigned NumCmps = 0;
9471 std::vector<MachineBasicBlock*> Table;
9475 for (
unsigned I = First;
I <= Last; ++
I)
9478 for (
unsigned I = First;
I <= Last; ++
I) {
9480 Prob += Clusters[
I].Prob;
9481 const APInt &Low = Clusters[
I].Low->getValue();
9482 const APInt &
High = Clusters[
I].High->getValue();
9483 NumCmps += (Low ==
High) ? 1 : 2;
9486 const APInt &PreviousHigh = Clusters[
I - 1].High->getValue();
9488 uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
9489 for (uint64_t J = 0; J < Gap; J++)
9490 Table.push_back(DefaultMBB);
9492 uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
9493 for (uint64_t J = 0; J < ClusterSize; ++J)
9494 Table.push_back(Clusters[
I].MBB);
9495 JTProbs[Clusters[
I].MBB] += Clusters[
I].Prob;
9499 unsigned NumDests = JTProbs.
size();
9501 NumDests, NumCmps, Clusters[First].Low->getValue(),
9502 Clusters[Last].High->getValue(), DAG.getDataLayout())) {
9516 if (Done.
count(Succ))
9518 addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
9524 ->createJumpTableIndex(Table);
9528 JumpTableHeader JTH(Clusters[First].Low->getValue(),
9531 JTCases.emplace_back(std::move(JTH), std::move(JT));
9533 JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].
High,
9534 JTCases.size() - 1, Prob);
9538 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
9543 assert(!Clusters.empty());
9544 for (CaseCluster &
C : Clusters)
9546 for (
unsigned i = 1, e = Clusters.size(); i < e; ++i)
9547 assert(Clusters[i - 1].
High->getValue().slt(Clusters[i].Low->getValue()));
9554 const int64_t
N = Clusters.size();
9556 const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
9558 if (N < 2 || N < MinJumpTableEntries)
9563 for (
unsigned i = 0; i <
N; ++i) {
9564 const APInt &
Hi = Clusters[i].High->getValue();
9565 const APInt &
Lo = Clusters[i].Low->getValue();
9566 TotalCases[i] = (Hi -
Lo).getLimitedValue() + 1;
9568 TotalCases[i] += TotalCases[i - 1];
9572 uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
9573 uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
9575 assert(Range >= NumCases);
9577 CaseCluster JTCluster;
9578 if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
9579 Clusters[0] = JTCluster;
9606 enum PartitionScores :
unsigned {
9614 MinPartitions[N - 1] = 1;
9615 LastElement[N - 1] = N - 1;
9616 PartitionsScore[N - 1] = PartitionScores::SingleCase;
9619 for (int64_t i = N - 2; i >= 0; i--) {
9622 MinPartitions[i] = MinPartitions[i + 1] + 1;
9624 PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
9627 for (int64_t j = N - 1; j > i; j--) {
9629 uint64_t Range = getJumpTableRange(Clusters, i, j);
9630 uint64_t NumCases = getJumpTableNumCases(TotalCases, i, j);
9632 assert(Range >= NumCases);
9634 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9635 unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
9636 int64_t NumEntries = j - i + 1;
9638 if (NumEntries == 1)
9639 Score += PartitionScores::SingleCase;
9640 else if (NumEntries <= SmallNumberOfEntries)
9641 Score += PartitionScores::FewCases;
9642 else if (NumEntries >= MinJumpTableEntries)
9643 Score += PartitionScores::Table;
9647 if (NumPartitions < MinPartitions[i] ||
9648 (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
9649 MinPartitions[i] = NumPartitions;
9651 PartitionsScore[i] = Score;
9658 unsigned DstIndex = 0;
9659 for (
unsigned First = 0, Last; First <
N; First = Last + 1) {
9660 Last = LastElement[First];
9662 assert(DstIndex <= First);
9663 unsigned NumClusters = Last - First + 1;
9665 CaseCluster JTCluster;
9666 if (NumClusters >= MinJumpTableEntries &&
9667 buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
9668 Clusters[DstIndex++] = JTCluster;
9670 for (
unsigned I = First;
I <= Last; ++
I)
9671 std::memmove(&Clusters[DstIndex++], &Clusters[
I],
sizeof(Clusters[I]));
9674 Clusters.resize(DstIndex);
9677 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
9678 unsigned First,
unsigned Last,
9680 CaseCluster &BTCluster) {
9685 BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9686 unsigned NumCmps = 0;
9687 for (int64_t
I = First;
I <= Last; ++
I) {
9690 NumCmps += (Clusters[
I].Low == Clusters[
I].High) ? 1 : 2;
9692 unsigned NumDests = Dests.count();
9694 APInt Low = Clusters[First].Low->getValue();
9695 APInt High = Clusters[Last].High->getValue();
9700 if (!TLI.isSuitableForBitTests(NumDests, NumCmps, Low, High, DL))
9706 const int BitWidth = TLI.getPointerTy(DL).getSizeInBits();
9707 assert(TLI.rangeFitsInWord(Low, High, DL) &&
9708 "Case range must fit in bit mask!");
9712 bool ContiguousRange =
true;
9713 for (int64_t
I = First + 1;
I <= Last; ++
I) {
9714 if (Clusters[
I].Low->getValue() != Clusters[
I - 1].High->getValue() + 1) {
9715 ContiguousRange =
false;
9725 ContiguousRange =
false;
9728 CmpRange = High - Low;
9733 for (
unsigned i = First; i <= Last; ++i) {
9736 for (j = 0; j < CBV.size(); ++j)
9737 if (CBV[j].BB == Clusters[i].MBB)
9739 if (j == CBV.size())
9742 CaseBits *CB = &CBV[j];
9745 uint64_t
Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
9746 uint64_t
Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
9747 assert(Hi >= Lo && Hi < 64 &&
"Invalid bit case!");
9748 CB->Mask |= (-1ULL >> (63 - (Hi -
Lo))) << Lo;
9749 CB->Bits += Hi - Lo + 1;
9750 CB->ExtraProb += Clusters[i].Prob;
9751 TotalProb += Clusters[i].Prob;
9755 llvm::sort(CBV, [](
const CaseBits &a,
const CaseBits &b) {
9757 if (a.ExtraProb != b.ExtraProb)
9758 return a.ExtraProb > b.ExtraProb;
9759 if (a.Bits != b.Bits)
9760 return a.Bits > b.Bits;
9761 return a.Mask < b.Mask;
9764 for (
auto &CB : CBV) {
9766 FuncInfo.MF->CreateMachineBasicBlock(SI->
getParent());
9767 BTI.
push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
9769 BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
9771 ContiguousRange,
nullptr,
nullptr, std::move(BTI),
9774 BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
9775 BitTestCases.size() - 1, TotalProb);
9779 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
9786 assert(!Clusters.empty());
9787 assert(Clusters[0].
Kind == CC_Range || Clusters[0].
Kind == CC_JumpTable);
9788 for (
const CaseCluster &
C : Clusters)
9789 assert(
C.Kind == CC_Range ||
C.Kind == CC_JumpTable);
9790 for (
unsigned i = 1; i < Clusters.size(); ++i)
9791 assert(Clusters[i-1].
High->getValue().slt(Clusters[i].Low->getValue()));
9807 const int64_t
N = Clusters.size();
9817 MinPartitions[N - 1] = 1;
9818 LastElement[N - 1] = N - 1;
9821 for (int64_t i = N - 2; i >= 0; --i) {
9824 MinPartitions[i] = MinPartitions[i + 1] + 1;
9829 for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
9834 Clusters[j].High->getValue(), DL))
9839 bool RangesOnly =
true;
9840 BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9841 for (int64_t k = i; k <= j; k++) {
9842 if (Clusters[k].
Kind != CC_Range) {
9846 Dests.set(Clusters[k].MBB->
getNumber());
9848 if (!RangesOnly || Dests.count() > 3)
9852 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9853 if (NumPartitions < MinPartitions[i]) {
9855 MinPartitions[i] = NumPartitions;
9862 unsigned DstIndex = 0;
9863 for (
unsigned First = 0, Last; First <
N; First = Last + 1) {
9864 Last = LastElement[First];
9866 assert(DstIndex <= First);
9868 CaseCluster BitTestCluster;
9869 if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
9870 Clusters[DstIndex++] = BitTestCluster;
9872 size_t NumClusters = Last - First + 1;
9874 sizeof(Clusters[0]) * NumClusters);
9875 DstIndex += NumClusters;
9878 Clusters.resize(DstIndex);
9881 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem
W,
Value *Cond,
9887 if (++BBI != FuncInfo.MF->end())
9890 unsigned Size = W.LastCluster - W.FirstCluster + 1;
9894 if (Size == 2 && W.MBB == SwitchMBB) {
9902 CaseCluster &
Small = *W.FirstCluster;
9903 CaseCluster &Big = *W.LastCluster;
9905 if (Small.Low == Small.High && Big.Low == Big.High &&
9906 Small.MBB == Big.MBB) {
9907 const APInt &SmallValue = Small.Low->getValue();
9908 const APInt &BigValue = Big.Low->getValue();
9911 APInt CommonBit = BigValue ^ SmallValue;
9913 SDValue CondLHS = getValue(Cond);
9915 SDLoc DL = getCurSDLoc();
9918 DAG.getConstant(CommonBit, DL, VT));
9920 DL,
MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
9926 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
9928 addSuccessorWithProb(
9929 SwitchMBB, DefaultMBB,
9933 addSuccessorWithProb(SwitchMBB, DefaultMBB);
9938 DAG.getBasicBlock(Small.MBB));
9941 DAG.getBasicBlock(DefaultMBB));
9943 DAG.setRoot(BrCond);
9954 llvm::sort(W.FirstCluster, W.LastCluster + 1,
9955 [](
const CaseCluster &a,
const CaseCluster &b) {
9956 return a.Prob != b.Prob ?
9958 a.Low->getValue().slt(b.Low->getValue());
9963 for (CaseClusterIt
I = W.LastCluster;
I > W.FirstCluster; ) {
9965 if (
I->Prob > W.LastCluster->Prob)
9967 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
9977 for (CaseClusterIt
I = W.FirstCluster;
I <= W.LastCluster; ++
I)
9978 UnhandledProbs +=
I->Prob;
9981 for (CaseClusterIt
I = W.FirstCluster,
E = W.LastCluster;
I <=
E; ++
I) {
9983 if (
I == W.LastCluster) {
9985 Fallthrough = DefaultMBB;
9988 CurMF->
insert(BBI, Fallthrough);
9990 ExportFromCurrentBlock(Cond);
9992 UnhandledProbs -=
I->Prob;
9995 case CC_JumpTable: {
9997 JumpTableHeader *JTH = &JTCases[
I->JTCasesIndex].first;
10002 CurMF->
insert(BBI, JumpMBB);
10004 auto JumpProb =
I->Prob;
10005 auto FallthroughProb = UnhandledProbs;
10013 if (*SI == DefaultMBB) {
10014 JumpProb += DefaultProb / 2;
10015 FallthroughProb -= DefaultProb / 2;
10022 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10023 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10028 JTH->HeaderBB = CurMBB;
10029 JT->Default = Fallthrough;
10032 if (CurMBB == SwitchMBB) {
10033 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10034 JTH->Emitted =
true;
10038 case CC_BitTests: {
10040 BitTestBlock *BTB = &BitTestCases[
I->BTCasesIndex];
10043 for (BitTestCase &BTC : BTB->Cases)
10044 CurMF->
insert(BBI, BTC.ThisBB);
10047 BTB->Parent = CurMBB;
10048 BTB->Default = Fallthrough;
10050 BTB->DefaultProb = UnhandledProbs;
10054 if (!BTB->ContiguousRange) {
10055 BTB->Prob += DefaultProb / 2;
10056 BTB->DefaultProb -= DefaultProb / 2;
10060 if (CurMBB == SwitchMBB) {
10061 visitBitTestHeader(*BTB, SwitchMBB);
10062 BTB->Emitted =
true;
10067 const Value *RHS, *LHS, *MHS;
10069 if (
I->Low ==
I->High) {
10084 CaseBlock CB(CC, LHS, RHS, MHS,
I->MBB, Fallthrough, CurMBB,
10085 getCurSDLoc(),
I->Prob, UnhandledProbs);
10087 if (CurMBB == SwitchMBB)
10088 visitSwitchCase(CB, SwitchMBB);
10090 SwitchCases.push_back(CB);
10095 CurMBB = Fallthrough;
10099 unsigned SelectionDAGBuilder::caseClusterRank(
const CaseCluster &CC,
10100 CaseClusterIt First,
10101 CaseClusterIt Last) {
10102 return std::count_if(First, Last + 1, [&](
const CaseCluster &
X) {
10103 if (X.Prob != CC.Prob)
10104 return X.Prob > CC.Prob;
10107 return X.Low->getValue().slt(CC.Low->getValue());
10111 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10112 const SwitchWorkListItem &W,
10115 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10116 "Clusters not sorted?");
10118 assert(W.LastCluster - W.FirstCluster + 1 >= 2 &&
"Too small to split!");
10123 CaseClusterIt LastLeft = W.FirstCluster;
10124 CaseClusterIt FirstRight = W.LastCluster;
10125 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10126 auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10133 while (LastLeft + 1 < FirstRight) {
10134 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10135 LeftProb += (++LastLeft)->Prob;
10137 RightProb += (--FirstRight)->Prob;
10147 unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10148 unsigned NumRight = W.LastCluster - FirstRight + 1;
10150 if (std::min(NumLeft, NumRight) < 3 &&
std::max(NumLeft, NumRight) > 3) {
10154 if (NumLeft < NumRight) {
10156 CaseCluster &CC = *FirstRight;
10157 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10158 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10159 if (LeftSideRank <= RightSideRank) {
10166 assert(NumRight < NumLeft);
10168 CaseCluster &CC = *LastLeft;
10169 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10170 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10171 if (RightSideRank <= LeftSideRank) {
10182 assert(LastLeft + 1 == FirstRight);
10183 assert(LastLeft >= W.FirstCluster);
10184 assert(FirstRight <= W.LastCluster);
10188 CaseClusterIt PivotCluster = FirstRight;
10189 assert(PivotCluster > W.FirstCluster);
10190 assert(PivotCluster <= W.LastCluster);
10192 CaseClusterIt FirstLeft = W.FirstCluster;
10193 CaseClusterIt LastRight = W.LastCluster;
10205 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10206 FirstLeft->Low == W.GE &&
10207 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
10208 LeftMBB = FirstLeft->MBB;
10210 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10211 FuncInfo.MF->
insert(BBI, LeftMBB);
10212 WorkList.push_back(
10213 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10215 ExportFromCurrentBlock(Cond);
10222 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10223 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10224 RightMBB = FirstRight->MBB;
10226 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10227 FuncInfo.MF->
insert(BBI, RightMBB);
10228 WorkList.push_back(
10229 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10231 ExportFromCurrentBlock(Cond);
10235 CaseBlock CB(
ISD::SETLT, Cond, Pivot,
nullptr, LeftMBB, RightMBB, W.MBB,
10236 getCurSDLoc(), LeftProb, RightProb);
10238 if (W.MBB == SwitchMBB)
10239 visitSwitchCase(CB, SwitchMBB);
10241 SwitchCases.push_back(CB);
10264 const SwitchInst &SI, CaseClusterVector &Clusters,
10274 unsigned PeeledCaseIndex = 0;
10275 bool SwitchPeeled =
false;
10277 CaseCluster &CC = Clusters[
Index];
10278 if (CC.Prob < TopCaseProb)
10280 TopCaseProb = CC.Prob;
10281 PeeledCaseIndex =
Index;
10282 SwitchPeeled =
true;
10287 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: " 10288 << TopCaseProb <<
"\n");
10294 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->
getBasicBlock());
10295 FuncInfo.MF->
insert(BBI, PeeledSwitchMBB);
10298 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10299 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10300 nullptr,
nullptr, TopCaseProb.
getCompl()};
10301 lowerWorkItem(W, SI.
getCondition(), SwitchMBB, PeeledSwitchMBB);
10303 Clusters.erase(PeeledCaseIt);
10304 for (CaseCluster &CC : Clusters) {
10306 dbgs() <<
"Scale the probablity for one cluster, before scaling: " 10307 << CC.Prob <<
"\n");
10311 PeeledCaseProb = TopCaseProb;
10312 return PeeledSwitchMBB;
10315 void SelectionDAGBuilder::visitSwitch(
const SwitchInst &SI) {
10318 CaseClusterVector Clusters;
10320 for (
auto I : SI.
cases()) {
10326 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10334 sortAndRangeify(Clusters);
10339 bool UnreachableDefault =
10341 if (UnreachableDefault && !Clusters.empty()) {
10343 unsigned MaxPop = 0;
10345 for (
auto I : SI.
cases()) {
10347 if (++Popularity[BB] > MaxPop) {
10348 MaxPop = Popularity[BB];
10353 assert(MaxPop > 0 && MaxBB);
10354 DefaultMBB = FuncInfo.MBBMap[MaxBB];
10358 CaseClusterVector New;
10359 New.reserve(Clusters.size());
10360 for (CaseCluster &CC : Clusters) {
10361 if (CC.MBB != DefaultMBB)
10364 Clusters = std::move(New);
10371 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10375 if (Clusters.empty()) {
10376 assert(PeeledSwitchMBB == SwitchMBB);
10378 if (DefaultMBB != NextBlock(SwitchMBB)) {
10380 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10385 findJumpTables(Clusters, &SI, DefaultMBB);
10386 findBitTestClusters(Clusters, &SI);
10389 dbgs() <<
"Case clusters: ";
10390 for (
const CaseCluster &
C : Clusters) {
10391 if (
C.Kind == CC_JumpTable)
10393 if (
C.Kind == CC_BitTests)
10397 if (
C.Low !=
C.High) {
10406 assert(!Clusters.empty());
10407 SwitchWorkList WorkList;
10408 CaseClusterIt First = Clusters.begin();
10409 CaseClusterIt Last = Clusters.end() - 1;
10410 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10416 WorkList.push_back(
10417 {PeeledSwitchMBB, First, Last,
nullptr,
nullptr, DefaultProb});
10419 while (!WorkList.empty()) {
10420 SwitchWorkListItem W = WorkList.back();
10421 WorkList.pop_back();
10422 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10427 splitWorkItem(WorkList, W, SI.
getCondition(), SwitchMBB);
10431 lowerWorkItem(W, SI.
getCondition(), SwitchMBB, DefaultMBB);
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Return a value (possibly void), from a function.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
Value * getValueOperand()
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
static MVT getIntegerVT(unsigned BitWidth)
void setByValAlign(unsigned A)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
A parsed version of the target data layout string in and methods for querying it. ...
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static ConstantInt * getFalse(LLVMContext &Context)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getValueType() const
Return the ValueType of the referenced return value.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class is the base class for the comparison instructions.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
C - The default llvm calling convention, compatible with C.
Constrained versions of libm-equivalent floating point intrinsics.
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
*p = old <signed v ? old : v
bool usesUnderscoreLongJmp() const
Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
BranchProbability getCompl() const
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
unsigned arg_size() const
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
This represents the llvm.dbg.label instruction.
Atomic ordering constants.
DELETED_NODE - This is an illegal value that is used to catch errors.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall...
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class represents lattice values for constants.
void setCopyElisionCandidate()
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block...
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI)
expandExp2 - Lower an exp2 intrinsic.
void setCallsUnwindInit(bool b)
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
iterator begin() const
begin/end - Return all of the registers in this class.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
A Module instance is used to store all the information related to an LLVM module. ...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
An instruction for ordering other memory operations.
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, unsigned DstAlign, SDValue Src, unsigned SrcAlign, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
void CopyValueToVirtualRegister(const Value *V, unsigned Reg)
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
const Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool isVector() const
Return true if this is a vector value type.
unsigned getIROrder() const
Return the node ordering.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6...
bool usesUnderscoreSetJmp() const
Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
A specialization of it's base class for read only access to a gc.statepoint.
bool isABIMangled() const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void push_back(const T &Elt)
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
MCSymbol * getOrCreateFrameAllocSymbol(StringRef FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen...
static unsigned getFlagWord(unsigned Kind, unsigned NumOps)
unsigned getReg() const
getReg - Returns the register number.
static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL, SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &RefOpInfo)
GetRegistersForValue - Assign registers (virtual or physical) for the specified operand.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool slt(const APInt &RHS) const
Signed less than comparison.
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
This class represents a function call, abstracting a target machine's calling convention.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static bool getUniformBase(const Value *&Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB)
Value * getCondition() const
unsigned getSourceAlignment() const
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
gep_type_iterator gep_type_end(const User *GEP)
SDValue getBasicBlock(MachineBasicBlock *MBB)
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
const std::string & getAsmString() const
*p = old <unsigned v ? old : v
unsigned getVectorNumElements() const
This class represents the atomic memcpy intrinsic i.e.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
Function Alias Analysis Results
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
*p = old >unsigned v ? old : v
This instruction constructs a fixed permutation of two input vectors.
Optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
LLVMContext & getContext() const
All values hold a context through their type.
bool isTerminator() const
static LocationSize precise(uint64_t Value)
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
auto count_if(R &&Range, UnaryPredicate P) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI)
expandLog2 - Lower a log2 intrinsic.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
void setInConsecutiveRegs()
BasicBlock * getSuccessor(unsigned i) const
Value * getNewValOperand()
Constrained versions of the binary floating point operators.
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
unsigned const TargetRegisterInfo * TRI
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, ImmutableCallSite CS) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
bool isInteger() const
Return true if this is an integer or a vector integer type.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, Optional< CallingConv::ID > CC=None, Optional< ISD::NodeType > AssertOp=None)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
MachineModuleInfo & getMMI() const
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
An instruction for reading from memory.
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
SDNode * getNode() const
get the SDNode which holds the desired result
Value * getCondition() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
BasicBlock * getUnwindDest() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isVectorTy() const
True if this is an instance of VectorType.
static BranchProbability getOne()
This is the common base class for constrained floating point intrinsics.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
BasicBlock * getSuccessor() const
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
Value * getLength() const
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
*p = old >signed v ? old : v
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls, even if TrapUnreachable is true.
static unsigned getUnderlyingArgReg(const SDValue &N)
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
EntryToken - This is the marker used to indicate the start of a region.
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter)
Add a dbg_value SDNode.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isWrappedSet() const
Return true if this set wraps around the top of the range.
BasicBlock * getSuccessor(unsigned i) const
SDValue getExternalSymbol(const char *Sym, EVT VT)
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI)
Determine if any floating-point values are being passed to this variadic function, and set the MachineModuleInfo's usesVAFloatArgument flag if so.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, unsigned DstAlign, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
void setCurrentCallSite(unsigned Site)
Set the call site currently being processed.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
iterator begin()
Instruction iterator methods.
bool hasSideEffects() const
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
Value * getArgOperand(unsigned i) const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void setNoSignedWrap(bool b)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
SDValue getRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
The address of a basic block.
bool match(Val *V, const Pattern &P)
unsigned getAllocaAddrSpace() const
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
CLEANUPRET - Represents a return from a cleanup block funclet.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
A description of a memory reference used in the backend.
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC)
getFlagWordForRegClass - Augment an existing flag word returned by getFlagWord with the required regi...
PCMARKER - This corresponds to the pcmarker intrinsic.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
void visitSwitchCase(CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void setVectorReduction(bool b)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
unsigned getNumSuccessors() const
static bool hasOnlySelectUsers(const Value *Cond)
Shift and rotation operations.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
std::size_t countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
Class to represent struct types.
LLVMContext & getContext() const
Get the global data context.
A Use represents the edge between a Value definition and its users.
DILabel * getLabel() const
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
void visitJumpTable(JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
BinOp getOperation() const
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO)
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
RESULT = SMULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same wi...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
bool IsPostTypeLegalization
This file contains the simple types necessary to represent the attributes associated with functions a...
TypeID
Definitions of all of the base types for the Type system.
NaN behavior not applicable.
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
InstrTy * getInstruction() const
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA)...
The memory access is dereferenceable (i.e., doesn't trap).
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first...
static bool isRegDefEarlyClobberKind(unsigned Flag)
static unsigned convertMemFlagWordToMatchingFlagWord(unsigned InputFlag)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
const DataLayout & getDataLayout() const
unsigned getDestAlignment() const
void setByValSize(unsigned S)
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
void setFunctionContextIndex(int I)
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
uint64_t getNumElements() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
unsigned getID() const
Return the register class ID number.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
zlib-gnu style compression
This file implements a class to represent arbitrary precision integral constant values and operations...
INLINEASM - Represents an inline asm block.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
unsigned getLiveInPhysReg(unsigned VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, Optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
STACKSAVE - STACKSAVE has one operand, an input chain.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
void assign(size_type NumElts, const T &Elt)
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch, catchpad/ret, and cleanuppad/ret.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
ValTy * getCalledValue() const
Return the pointer to function that is being called.
void emitError(unsigned LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Value * getPointerOperand()
unsigned getActiveBits() const
Compute the number of active bits in the value.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Flag, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
unsigned getScalarSizeInBits() const
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Class to represent function types.
A constant value that is initialized with an expression using other constant values.
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Type * getType() const
All values are typed, get the type of this value.
CATCHPAD - Represents a catchpad instruction.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Flag, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
MachineFunction & getMachineFunction() const
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO)
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here...
const LiveOutInfo * GetLiveOutRegInfo(unsigned Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
SDValue getTargetFrameIndex(int FI, EVT VT)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
const TargetMachine & getTarget() const
const T & getValue() const LLVM_LVALUE_FUNCTION
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
const MCContext & getContext() const
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
This contains information for each constraint that we are lowering.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Simple integer binary arithmetic operators.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo.ValueMap for a virtual register.
This instruction compares its operands according to the predicate given to the constructor.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
SmallVector< ISD::OutputArg, 32 > Outs
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
This class represents a no-op cast from one type to another.
unsigned getCurrentCallSite()
Get the call site currently being processed, if any.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
const APInt & getValue() const
Return the constant as an APInt value reference.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool ShouldEmitAsBranches(const std::vector< CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
static Optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI)
visitPow - Lower a pow intrinsic.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
AttributeList getAttributes() const
Return the attribute list for this Function.
An instruction for storing to memory.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, bool IsTruncating=false, bool IsCompressing=false)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
void init(GCFunctionInfo *gfi, AliasAnalysis *AA, const TargetLibraryInfo *li)
virtual const TargetInstrInfo * getInstrInfo() const
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
static bool isRegDefKind(unsigned Flag)
ArrayRef< SDUse > ops() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
bool findValue(const Value *V) const
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void setStackProtectorIndex(int I)
void setOrigAlign(unsigned A)
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
amdgpu Simplify well known AMD library false Value * Callee
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
Value * getOperand(unsigned i) const
Analysis containing CSE Info
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Class to represent pointers.
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void append(const RegsForValue &RHS)
Add the specified values to this one.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
TargetInstrInfo - Interface to description of machine instruction set.
This corresponds to the llvm.lifetime.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
iterator find(const_arg_type_t< KeyT > Val)
bool doesNotAccessMemory() const
Determine if the function does not access memory.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
bool isVoidTy() const
Return true if this is 'void'.
The memory access is volatile.
const BasicBlock & getEntryBlock() const
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
initializer< Ty > init(const Ty &Val)
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
Type * getReturnType() const
Returns the type of the ret val.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
SmallVector< SDValue, 4 > InVals
std::vector< AsmOperandInfo > AsmOperandInfoVector
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Minimum number of bits that can be specified.
These reductions are non-strict, and have a single vector operand.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
The landingpad instruction holds all of the information necessary to generate correct exception handl...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Control flow instructions. These all have token chains.
MCSymbol * createTempSymbol(bool CanBeUnnamed=true)
Create and return a new assembler temporary symbol with a unique but unspecified name.
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned const MachineRegisterInfo * MRI
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
Value * getCalledValue() const
LLVM Basic Block Representation.
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
This is an important class for using LLVM in a threaded context.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
Conditional or Unconditional Branch instruction.
bool is_splat(R &&Range)
Wrapper function around std::equal to detect if all elements in a container are same.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
unsigned getScalarSizeInBits() const
This function has undefined behavior.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
bool hasEHFunclets() const
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
Resume the propagation of an exception.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
const SDValue & getOperand(unsigned Num) const
Value * getCompareOperand()
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Indirect Branch Instruction.
ConstantFP - Floating Point Values [float, double].
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
int getStackProtectorIndex() const
Return the index for the stack protector object.
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
BasicBlock * getDefaultDest() const
DIExpression * getExpression() const
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed...
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isFast() const
'Fast' means all bits are set.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
const Instruction & back() const
bool optForSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
This instruction compares its operands according to the predicate given to the constructor.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl...
void print(raw_ostream &O, bool IsForDebug=false) const
Implement operator<< on Value.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
TRAP - Trapping instruction.
const Triple & getTargetTriple() const
Value * getPointerOperand()
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Base class for variables.
DEBUGTRAP - Trap intended to get the attention of a debugger.
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
The memory access is non-temporal.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type. ...
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Constant Vector Declarations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint)
Augment an existing flag word returned by getFlagWord with the constraint code for a memory constrain...
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::LoadExtType, bool IsExpanding=false)
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
const MachineInstrBuilder & addFrameIndex(int Idx) const
Bit counting operators with an undefined result for zero inputs.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value...
succ_iterator succ_begin()
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
std::vector< ArgListEntry > ArgListTy
static bool isMemKind(unsigned Flag)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&... args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest...
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Attribute getAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return the attribute object that exists at the given index.
bool isAlignStack() const
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
uint32_t getElementSizeInBytes() const
const Value * getArraySize() const
Get the number of elements allocated.
This structure contains all information that is necessary for lowering calls.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
void setHasLocalEscape(bool V)
This class contains a discriminated union of information about pointers in memory operands...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
SDLoc getCurSDLoc() const
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, unsigned Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID)
Gets a node for an atomic cmpxchg op.
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI)
expandExp - Lower an exp intrinsic.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
Triple - Helper class for working with autoconf configuration names.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
void sort(IteratorTy Start, IteratorTy End)
TargetIntrinsicInfo - Interface to description of machine instruction set.
The memory access writes data.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static bool isVectorReductionOp(const User *I)
Checks if the given instruction performs a vector reduction, in which case we have the freedom to alt...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
bool isEmptySet() const
Return true if this set contains no members.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it, emit nodes to copy the value into the virtual registers.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
Given one NaN input, returns the non-NaN.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Representation for a specific memory location.
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
TokenFactor - This node takes multiple tokens as input and produces a single token result...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static const fltSemantics & IEEEsingle() LLVM_READNONE
const TargetLowering & getTargetLoweringInfo() const
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setNoUnsignedWrap(bool b)
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1: ...
unsigned countPopulation(T Value)
Count the number of set bits in a value.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the generic address space (address sp...
This is the shared class of boolean and integer constants.
auto size(R &&Range, typename std::enable_if< std::is_same< typename std::iterator_traits< decltype(Range.begin())>::iterator_category, std::random_access_iterator_tag >::value, void >::type *=nullptr) -> decltype(std::distance(Range.begin(), Range.end()))
Get the size of a range.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
static uint32_t getDenominator()
BlockVerifier::State From
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
Returns platform specific canonical encoding of a floating point number.
ValTy * getArgument(unsigned ArgNo) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
EVT getVectorElementType() const
Given a vector type, return the type of each element.
virtual const char * getClearCacheBuiltinName() const
Return the builtin name for the __builtin___clear_cache intrinsic Default is to invoke the clear cach...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
RegsForValue - This struct represents the registers (physical or virtual) that a particular set of va...
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Utility class for floating point operations which can have information about relaxed accuracy require...
Module.h This file contains the declarations for the Module class.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Provides information about what library functions are available for the current target.
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
This class represents a range of values.
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
BRCOND - Conditional branch.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isOSLinux() const
Tests whether the OS is Linux.
An SDNode that represents everything that will be needed to construct a MachineInstr.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Byte Swap and Counting operators.
void setCallsEHReturn(bool b)
void visit(const Instruction &I)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool rangeFitsInWord(const APInt &Low, const APInt &High, const DataLayout &DL) const
Check whether the range [Low,High] fits in a machine word.
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling...
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block, return true.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
uint64_t scale(uint64_t Num) const
Scale a large integer.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0)
Append the extracted elements from Start to Count out of the vector Op in Args.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)
static Optional< CallingConv::ID > getABIRegCopyCC(const Value *V)
Represents one node in the SelectionDAG.
Value * getRawSource() const
Return the arguments to the instruction.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI)
expandLog - Lower a log intrinsic.
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
UpdateSplitBlock - When an MBB was split during scheduling, update the references that need to refer ...
AttributeList getAttributes() const
Return the parameter attributes for this call.
unsigned getEVTAlignment(EVT MemoryVT) const
Compute the default alignment value for the given type.
static ConstantInt * getTrue(LLVMContext &Context)
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
const Function & getFunction() const
Return the LLVM function that this machine code represents.
unsigned logBase2() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned getVectorNumElements() const
Class to represent vector types.
SmallVector< std::pair< unsigned, unsigned >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
static const unsigned MaxParallelChains
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
Target - Wrapper for Target specific information.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Class for arbitrary precision integers.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1', drop it.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
bool fragmentsOverlap(const DIExpression *Other) const
Check if fragments overlap between this DIExpression and Other.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
void visitBitTestCase(BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, unsigned Reg, BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
BranchProbabilityInfo * BPI
Select(COND, TRUEVAL, FALSEVAL).
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
iterator_range< user_iterator > users()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
ANY_EXTEND - Used for integer types. The high bits are undefined.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
const MCPhysReg * iterator
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned char TargetFlags=0)
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
virtual Value * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a memcmp, in cases where that is faster than a libcall...
CATCHRET - Represents a return from a catch block funclet.
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, Optional< CallingConv::ID > CallConv=None, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts...
amdgpu Simplify well known AMD library false Value Value * Arg
SmallVector< SDValue, 8 > PendingLoads
PendingLoads - Loads are not emitted to the program immediately.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static void tryToElideArgumentCopy(FunctionLoweringInfo *FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction *> &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, SDValue ArgVal, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CallLoweringInfo & setTailCall(bool Value=true)
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
bool isDereferenceablePointer(const Value *V, const DataLayout &DL, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Return true if this is always a dereferenceable pointer.
BR_JT - Jumptable branch.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Predicate getPredicate() const
Return the predicate for this instruction.
unsigned succ_size(const Instruction *I)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
These are IR-level optimization flags that may be propagated to SDNodes.
Represents a use of a SDNode.
CallLoweringInfo & setConvergent(bool Value=true)
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
SmallVector< SDValue, 32 > OutVals
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Analysis providing branch probability information.
unsigned getNumArgOperands() const
bool isVector() const
Return true if this is a vector value type.
Bitwise operators - logical and, logical or, logical xor.
Holds the information from a dbg_label node through SDISel.
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
pointer data()
Return a pointer to the vector's buffer, even if empty().
const Function * getParent() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
unsigned getAlignment() const
Return the alignment of the access that is being performed.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
StringRef getValueAsString() const
Return the attribute's value as a string.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is 'token'.
This file provides utility analysis objects describing memory locations.
Value * getPointerOperand()
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
Establish a view to a call site for examination.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
const Function * getParent() const
Return the enclosing method, or null if none.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
void push_back(MachineInstr *MI)
bool onlyReadsMemory(unsigned OpNo) const
const TargetSubtargetInfo & getSubtarget() const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
virtual const TargetFrameLowering * getFrameLowering() const
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned InferPtrAlignment(SDValue Ptr) const
Infer alignment of a load / store address.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Value * getCatchSwitchParentPad() const
Get the parentPad of this catchret's catchpad's catchswitch.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
DILocalVariable * getVariable() const
unsigned getOpcode() const
SDValue getValue(unsigned R) const
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
X86_INTR - x86 hardware interrupt context.
virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
bool isUnconditional() const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block...
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, unsigned DstAlign, SDValue Src, unsigned SrcAlign, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_NODISCARD bool empty() const
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Type * getType() const
Return the type of the instruction that generated this call site.
SDValue getCopyFromRegs(const Value *V, Type *Ty)
getCopyFromRegs - If there was virtual register allocated for the value V emit CopyFromReg of the spe...
bool isReg() const
isReg - Tests if this is a MO_Register operand.
OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere...
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
bool isMinValue() const
Determine if this is the smallest unsigned value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin...
AsmDialect getDialect() const
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, const Value *PtrVal, unsigned Alignment, AtomicOrdering Ordering, SyncScope::ID SSID)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands...
bool isStatepoint(ImmutableCallSite CS)
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input...
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall, const BasicBlock *EHPadBB=nullptr)
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned char TargetFlags=0)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const BasicBlock & front() const
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
Given one NaN input, returns the NaN.
SmallVector< int, 16 > getShuffleMask() const
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability >> &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
FunctionType * getFunctionType() const
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
void setDebugLoc(DebugLoc dl)
Set source location info.
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
SDValue getValueType(EVT)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
PREFETCH - This corresponds to a prefetch intrinsic.
static const Function * getParent(const Value *V)
void AddInlineAsmOperands(unsigned Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Type * getElementType() const
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports i...
bool hasNoNaNs() const
Determine whether the no-NaNs flag is set.
bool hasOneUse() const
Return true if there is exactly one user of this value.
Convenience struct for specifying and reasoning about fast-math flags.
StringRef - Represent a constant reference to a string, i.e.
Garbage collection metadata for a single function.
void clearDanglingDebugInfo()
Clear the dangling debug information map.
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
const Instruction * getFirstNonPHIOrDbg() const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic...
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static BranchProbability getZero()
bool isConvergent() const
Determine if the call is convergent.
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty...
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI)
expandLog10 - Lower a log10 intrinsic.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
static bool isVolatile(Instruction *Inst)
static APInt getNullValue(unsigned numBits)
Get the '0' value.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
const SDValue & getOperand(unsigned i) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
const Value * stripInBoundsConstantOffsets() const
Strip off pointer casts and all-constant inbounds GEPs.
TRUNCATE - Completely drop the high bits.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Op, int64_t Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object...
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
for(unsigned i=Desc.getNumOperands(), e=OldMI.getNumOperands();i !=e;++i)
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand *> NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
Holds the information from a dbg_value node through SDISel.
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array...
Perform various unary floating-point operations inspired by libm.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
SmallVector< unsigned, 4 > Regs
This list holds the registers assigned to the values.
Value * getPointerOperand()
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo)
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Value * getRawDest() const
static MachineOperand CreateFI(int Idx)
uint32_t getNumerator() const
void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests" ...
LocationClass< Ty > location(Ty &L)
LLVMContext * getContext() const
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Type * getElementType() const
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
std::vector< MachineBasicBlock * >::iterator succ_iterator
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
static BranchProbability getBranchProbStackProtector(bool IsLikely)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
iterator_range< arg_iterator > args()
bool isStructTy() const
True if this is an instance of StructType.
static unsigned getFlagWordForMatchingOp(unsigned InputFlag, unsigned MatchedOperandNo)
getFlagWordForMatchingOp - Augment an existing flag word returned by getFlagWord with information ind...
bool isArrayTy() const
True if this is an instance of ArrayType.
static Optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
This class contains meta information specific to a module.
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
an instruction to allocate memory on the stack
vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const
Loop over all of the value types that can be represented by values in the given register class...
This instruction inserts a struct field of array element value into an aggregate value.
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
gep_type_iterator gep_type_begin(const User *GEP)
static bool InBlock(const Value *V, const BasicBlock *BB)
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, Optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...