100 #define DEBUG_TYPE "ppc-lowering" 106 cl::desc(
"disable setting the node scheduling preference to ILP on PPC"),
cl::Hidden);
117 STATISTIC(NumTailCalls,
"Number of tail calls");
118 STATISTIC(NumSiblingCalls,
"Number of sibling calls");
134 bool isPPC64 = Subtarget.
isPPC64();
175 if (!Subtarget.
hasSPE()) {
184 for (
MVT VT : ScalarIntVTs) {
194 if (isPPC64 || Subtarget.
hasFPCVT()) {
200 isPPC64 ? MVT::i64 : MVT::i32);
1194 unsigned MaxMaxAlign) {
1195 if (MaxAlign == MaxMaxAlign)
1197 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1198 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1200 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1202 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1203 unsigned EltAlign = 0;
1205 if (EltAlign > MaxAlign)
1206 MaxAlign = EltAlign;
1207 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
1208 for (
auto *EltTy : STy->elements()) {
1209 unsigned EltAlign = 0;
1211 if (EltAlign > MaxAlign)
1212 MaxAlign = EltAlign;
1213 if (MaxAlign == MaxMaxAlign)
1256 return Subtarget.
hasSPE();
1272 return "PPCISD::FP_TO_UINT_IN_VSR,";
1274 return "PPCISD::FP_TO_SINT_IN_VSR";
1329 return "PPCISD::ST_VSR_SCAL_INT";
1396 return CFP->getValueAPF().isZero();
1400 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
CP->getConstVal()))
1401 return CFP->getValueAPF().isZero();
1409 return Op < 0 || Op == Val;
1421 if (ShuffleKind == 0) {
1424 for (
unsigned i = 0; i != 16; ++i)
1427 }
else if (ShuffleKind == 2) {
1430 for (
unsigned i = 0; i != 16; ++i)
1433 }
else if (ShuffleKind == 1) {
1434 unsigned j = IsLE ? 0 : 1;
1435 for (
unsigned i = 0; i != 8; ++i)
1452 if (ShuffleKind == 0) {
1455 for (
unsigned i = 0; i != 16; i += 2)
1459 }
else if (ShuffleKind == 2) {
1462 for (
unsigned i = 0; i != 16; i += 2)
1466 }
else if (ShuffleKind == 1) {
1467 unsigned j = IsLE ? 0 : 2;
1468 for (
unsigned i = 0; i != 8; i += 2)
1494 if (ShuffleKind == 0) {
1497 for (
unsigned i = 0; i != 16; i += 4)
1503 }
else if (ShuffleKind == 2) {
1506 for (
unsigned i = 0; i != 16; i += 4)
1512 }
else if (ShuffleKind == 1) {
1513 unsigned j = IsLE ? 0 : 4;
1514 for (
unsigned i = 0; i != 8; i += 4)
1531 unsigned LHSStart,
unsigned RHSStart) {
1534 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1535 "Unsupported merge size!");
1537 for (
unsigned i = 0; i != 8/UnitSize; ++i)
1538 for (
unsigned j = 0; j != UnitSize; ++j) {
1540 LHSStart+j+i*UnitSize) ||
1542 RHSStart+j+i*UnitSize))
1557 if (ShuffleKind == 1)
1558 return isVMerge(N, UnitSize, 0, 0);
1559 else if (ShuffleKind == 2)
1560 return isVMerge(N, UnitSize, 0, 16);
1564 if (ShuffleKind == 1)
1565 return isVMerge(N, UnitSize, 8, 8);
1566 else if (ShuffleKind == 0)
1567 return isVMerge(N, UnitSize, 8, 24);
1582 if (ShuffleKind == 1)
1583 return isVMerge(N, UnitSize, 8, 8);
1584 else if (ShuffleKind == 2)
1585 return isVMerge(N, UnitSize, 8, 24);
1589 if (ShuffleKind == 1)
1590 return isVMerge(N, UnitSize, 0, 0);
1591 else if (ShuffleKind == 0)
1592 return isVMerge(N, UnitSize, 0, 16);
1641 unsigned RHSStartValue) {
1645 for (
unsigned i = 0; i < 2; ++i)
1646 for (
unsigned j = 0; j < 4; ++j)
1648 i*RHSStartValue+j+IndexOffset) ||
1650 i*RHSStartValue+j+IndexOffset+8))
1672 unsigned indexOffset = CheckEven ? 4 : 0;
1673 if (ShuffleKind == 1)
1674 return isVMerge(N, indexOffset, 0);
1675 else if (ShuffleKind == 2)
1676 return isVMerge(N, indexOffset, 16);
1681 unsigned indexOffset = CheckEven ? 0 : 4;
1682 if (ShuffleKind == 1)
1683 return isVMerge(N, indexOffset, 0);
1684 else if (ShuffleKind == 0)
1685 return isVMerge(N, indexOffset, 16);
1707 for (i = 0; i != 16 && SVOp->
getMaskElt(i) < 0; ++i)
1710 if (i == 16)
return -1;
1715 if (ShiftAmt < i)
return -1;
1720 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1722 for (++i; i != 16; ++i)
1725 }
else if (ShuffleKind == 1) {
1727 for (++i; i != 16; ++i)
1734 ShiftAmt = 16 - ShiftAmt;
1744 (EltSize == 1 || EltSize == 2 || EltSize == 4));
1756 if (ElementBase >= 16)
1761 for (
unsigned i = 1; i != EltSize; ++i)
1765 for (
unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1767 for (
unsigned j = 0; j != EltSize; ++j)
1785 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1786 "Unexpected element width.");
1787 assert((StepLen == 1 || StepLen == -1) &&
"Unexpected element width.");
1789 unsigned NumOfElem = 16 / Width;
1790 unsigned MaskVal[16];
1791 for (
unsigned i = 0; i < NumOfElem; ++i) {
1793 if ((StepLen == 1) && (MaskVal[0] % Width)) {
1795 }
else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1799 for (
unsigned int j = 1; j < Width; ++j) {
1801 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1811 unsigned &InsertAtByte,
bool &Swap,
bool IsLE) {
1820 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1821 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1826 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1827 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1828 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1829 InsertAtByte = IsLE ? 12 : 0;
1834 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1835 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1836 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1837 InsertAtByte = IsLE ? 8 : 4;
1842 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1843 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1844 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1845 InsertAtByte = IsLE ? 4 : 8;
1850 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1851 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1852 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1853 InsertAtByte = IsLE ? 0 : 12;
1863 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1864 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1865 InsertAtByte = IsLE ? 12 : 0;
1868 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1869 InsertAtByte = IsLE ? 8 : 4;
1872 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1873 InsertAtByte = IsLE ? 4 : 8;
1876 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1877 InsertAtByte = IsLE ? 0 : 12;
1886 bool &Swap,
bool IsLE) {
1901 assert(M0 < 4 &&
"Indexing into an undef vector?");
1902 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
1905 ShiftElts = IsLE ? (4 - M0) % 4 : M0;
1911 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
1915 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
1920 ShiftElts = (8 - M0) % 8;
1921 }
else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
1926 ShiftElts = (4 - M0) % 4;
1931 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
1936 }
else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
1953 for (
int i = 0; i < 16; i += Width)
1985 bool &Swap,
bool IsLE) {
1994 assert(((M0 | M1) < 4) &&
"A mask element out of bounds?");
1999 if ((M0 | M1) < 2) {
2000 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2008 if (M0 > 1 && M1 < 2) {
2010 }
else if (M0 < 2 && M1 > 1) {
2018 DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2021 if (M0 < 2 && M1 > 1) {
2023 }
else if (M0 > 1 && M1 < 2) {
2031 DM = (M0 << 1) + (M1 & 1);
2044 return (16 / EltSize) - 1 - (SVOp->
getMaskElt(0) / EltSize);
2061 if (EltSize < ByteSize) {
2062 unsigned Multiple = ByteSize/EltSize;
2064 assert(Multiple > 1 && Multiple <= 4 &&
"How can this happen?");
2072 if (!UniquedVals[i&(Multiple-1)].getNode())
2073 UniquedVals[i&(Multiple-1)] = N->
getOperand(i);
2074 else if (UniquedVals[i&(Multiple-1)] != N->
getOperand(i))
2084 bool LeadingZero =
true;
2085 bool LeadingOnes =
true;
2086 for (
unsigned i = 0; i != Multiple-1; ++i) {
2087 if (!UniquedVals[i].getNode())
continue;
2094 if (!UniquedVals[Multiple-1].getNode())
2096 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2101 if (!UniquedVals[Multiple-1].getNode())
2103 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2122 unsigned ValSizeInBytes = EltSize;
2125 Value = CN->getZExtValue();
2127 assert(CN->getValueType(0) ==
MVT::f32 &&
"Only one legal FP vector type!");
2128 Value =
FloatToBits(CN->getValueAPF().convertToFloat());
2134 if (ValSizeInBytes < ByteSize)
return SDValue();
2138 if (!
APInt(ValSizeInBytes * 8, Value).
isSplat(ByteSize * 8))
2145 if (MaskVal == 0)
return SDValue();
2148 if (SignExtend32<5>(MaskVal) == MaskVal)
2164 for (i = 0; i != 4 && SVOp->
getMaskElt(i) < 0; ++i)
2167 if (i == 4)
return -1;
2172 if (ShiftAmt < i)
return -1;
2176 for (++i; i != 4; ++i)
2192 if (!isa<ConstantSDNode>(N))
2195 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2197 return Imm == (int32_t)cast<ConstantSDNode>(
N)->getZExtValue();
2199 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2234 if (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0) {
2292 unsigned Alignment)
const {
2302 (!Alignment || (imm % Alignment) == 0)) {
2314 &&
"Cannot handle constant offsets yet!");
2326 (!Alignment || (imm % Alignment) == 0)) {
2336 dyn_cast<FrameIndexSDNode>(N.
getOperand(0))) {
2355 CN->getValueType(0));
2360 if ((CN->getValueType(0) ==
MVT::i32 ||
2361 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2362 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) {
2363 int Addr = (int)CN->getZExtValue();
2428 EVT MemVT =
LD->getMemoryVT();
2453 Ptr =
LD->getBasePtr();
2454 VT =
LD->getMemoryVT();
2455 Alignment =
LD->getAlignment();
2457 Ptr =
ST->getBasePtr();
2458 VT =
ST->getMemoryVT();
2459 Alignment =
ST->getAlignment();
2489 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2492 SDValue Val = cast<StoreSDNode>(
N)->getValue();
2522 isa<ConstantSDNode>(
Offset))
2537 unsigned &HiOpFlags,
unsigned &LoOpFlags,
2554 if (GV->hasHiddenVisibility()) {
2616 unsigned MOHiFlag, MOLoFlag;
2692 unsigned MOHiFlag, MOLoFlag;
2723 unsigned MOHiFlag, MOLoFlag;
2744 bool is64bit = Subtarget.
isPPC64();
2771 PtrVT, GOTReg, TGA);
2775 PtrVT, TGA, GOTPtr);
2812 PtrVT, GOTPtr, TGA, TGA);
2814 PtrVT, TLSAddr, TGA);
2836 unsigned MOHiFlag, MOLoFlag;
2896 if (
C->isAllOnesValue() ||
C->isNullValue())
2921 const Value *SV = cast<SrcValueSDNode>(Node->
getOperand(2))->getValue();
2962 InChain = OverflowArea.
getValue(1);
3008 InChain = DAG.
getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3015 assert(!Subtarget.
isPPC64() &&
"LowerVACOPY is PPC32 only");
3039 bool isPPC64 = (PtrVT ==
MVT::i64);
3045 Entry.
Ty = IntPtrTy;
3046 Entry.
Node = Trmp; Args.push_back(Entry);
3051 Args.push_back(Entry);
3053 Entry.
Node = FPtr; Args.push_back(Entry);
3054 Entry.
Node = Nest; Args.push_back(Entry);
3062 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
3063 return CallResult.second;
3113 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3116 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3119 uint64_t FPROffset = 1;
3128 uint64_t nextOffset = FPROffset;
3136 nextOffset += StackOffset;
3137 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3140 SDValue thirdStore = DAG.
getStore(secondStore, dl, StackOffsetFI, nextPtr,
3142 nextOffset += FrameOffset;
3143 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3146 return DAG.
getStore(thirdStore, dl, FR, nextPtr,
3150 #include "PPCGenCallingConv.inc" 3154 CCAssignFn *PPCTargetLowering::useFastISelCCs(
unsigned Flag)
const {
3155 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
3172 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3182 if (RegNum != NumArgRegs && RegNum % 2 == 1) {
3200 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3205 int RegsLeft = NumArgRegs - RegNum;
3209 if (RegNum != NumArgRegs && RegsLeft < 4) {
3210 for (
int i = 0; i < RegsLeft; i++) {
3224 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3234 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
3247 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
3248 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
3249 PPC::F11, PPC::F12, PPC::F13};
3253 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
3254 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3259 unsigned PtrByteSize) {
3267 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3276 unsigned PtrByteSize) {
3277 unsigned Align = PtrByteSize;
3293 if (BVAlign > PtrByteSize) {
3294 if (BVAlign % PtrByteSize != 0)
3296 "ByVal alignment is not a multiple of the pointer size");
3322 unsigned PtrByteSize,
3323 unsigned LinkageSize,
3324 unsigned ParamAreaSize,
3325 unsigned &ArgOffset,
3326 unsigned &AvailableFPRs,
3327 unsigned &AvailableVRs,
bool HasQPX) {
3328 bool UseMemory =
false;
3333 ArgOffset = ((ArgOffset + Align - 1) / Align) *
Align;
3336 if (ArgOffset >= LinkageSize + ParamAreaSize)
3342 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3345 if (ArgOffset > LinkageSize + ParamAreaSize)
3356 if (AvailableFPRs > 0) {
3364 if (AvailableVRs > 0) {
3376 unsigned NumBytes) {
3378 unsigned AlignMask = TargetAlign - 1;
3379 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
3383 SDValue PPCTargetLowering::LowerFormalArguments(
3389 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
3392 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
3395 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
3400 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3442 unsigned PtrByteSize = 4;
3451 CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3453 CCInfo.PreAnalyzeFormalArguments(Ins);
3455 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3456 CCInfo.clearWasPPCF128();
3458 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
3471 RC = &PPC::GPRCRegClass;
3475 RC = &PPC::VSSRCRegClass;
3476 else if (Subtarget.
hasSPE())
3477 RC = &PPC::SPE4RCRegClass;
3479 RC = &PPC::F4RCRegClass;
3483 RC = &PPC::VSFRCRegClass;
3484 else if (Subtarget.
hasSPE())
3485 RC = &PPC::SPERCRegClass;
3487 RC = &PPC::F8RCRegClass;
3492 RC = &PPC::VRRCRegClass;
3495 RC = Subtarget.
hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3499 RC = &PPC::VRRCRegClass;
3502 RC = &PPC::QFRCRegClass;
3505 RC = &PPC::QBRCRegClass;
3528 ArgOffset += ArgSize - ObjSize;
3546 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3548 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3551 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3552 MinReservedArea =
std::max(MinReservedArea, LinkageSize);
3569 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3574 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3586 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3591 CCInfo.getNextStackOffset(),
true));
3599 for (
unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3603 VReg = MF.
addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3618 for (
unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3622 VReg = MF.
addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3635 if (!MemOps.
empty())
3646 const SDLoc &dl)
const {
3657 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3670 "fastcc not supported on varargs functions");
3676 unsigned PtrByteSize = 8;
3680 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3681 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3685 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3691 const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3699 bool HasParameterArea = !isELFv2ABI || isVarArg;
3700 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3701 unsigned NumBytes = LinkageSize;
3702 unsigned AvailableFPRs = Num_FPR_Regs;
3703 unsigned AvailableVRs = Num_VR_Regs;
3704 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
3705 if (Ins[i].Flags.
isNest())
3709 PtrByteSize, LinkageSize, ParamAreaSize,
3710 NumBytes, AvailableFPRs, AvailableVRs,
3712 HasParameterArea =
true;
3719 unsigned ArgOffset = LinkageSize;
3720 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3721 unsigned &QFPR_idx = FPR_idx;
3724 unsigned CurArgIdx = 0;
3725 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
3727 bool needsLoad =
false;
3728 EVT ObjectVT = Ins[ArgNo].VT;
3729 EVT OrigVT = Ins[ArgNo].ArgVT;
3731 unsigned ArgSize = ObjSize;
3733 if (Ins[ArgNo].isOrigArg()) {
3734 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3735 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3740 unsigned CurArgOffset,
Align;
3741 auto ComputeArgOffset = [&]() {
3744 ArgOffset = ((ArgOffset + Align - 1) / Align) *
Align;
3745 CurArgOffset = ArgOffset;
3752 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3753 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3759 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
3766 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3788 if (HasParameterArea ||
3789 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3796 if (ObjSize < PtrByteSize) {
3800 if (!isLittleEndian) {
3806 if (GPR_idx != Num_GPR_Regs) {
3807 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3812 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3829 ArgOffset += PtrByteSize;
3838 for (
unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3839 if (GPR_idx == Num_GPR_Regs)
3842 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3855 ArgOffset += ArgSize;
3866 unsigned VReg = MF.
addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3870 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3878 if (GPR_idx != Num_GPR_Regs) {
3879 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3886 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3892 ArgSize = PtrByteSize;
3903 if (FPR_idx != Num_FPR_Regs) {
3909 ? &PPC::VSSRCRegClass
3910 : &PPC::F4RCRegClass);
3913 ? &PPC::VSFRCRegClass
3914 : &PPC::F8RCRegClass);
3925 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3930 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3949 ArgOffset += ArgSize;
3951 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3962 if (!Subtarget.
hasQPX()) {
3966 if (VR_idx != Num_VR_Regs) {
3967 unsigned VReg = MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3981 "Invalid QPX parameter type");
3989 if (QFPR_idx != Num_QFPR_Regs) {
3992 case MVT::v4f64: RC = &PPC::QFRCRegClass;
break;
3993 case MVT::v4f32: RC = &PPC::QSRCRegClass;
break;
3994 default: RC = &PPC::QBRCRegClass;
break;
3997 unsigned VReg = MF.
addLiveIn(QFPR[QFPR_idx], RC);
4013 if (ObjSize < ArgSize && !isLittleEndian)
4014 CurArgOffset += ArgSize - ObjSize;
4024 unsigned MinReservedArea;
4025 if (HasParameterArea)
4026 MinReservedArea =
std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4028 MinReservedArea = LinkageSize;
4041 int Depth = ArgOffset;
4050 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4051 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4052 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4063 if (!MemOps.
empty())
4069 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4084 unsigned PtrByteSize = isPPC64 ? 8 : 4;
4086 unsigned ArgOffset = LinkageSize;
4088 unsigned MinReservedArea = ArgOffset;
4092 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4095 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4096 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4100 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4107 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4118 unsigned VecArgOffset = ArgOffset;
4119 if (!isVarArg && !isPPC64) {
4120 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e;
4122 EVT ObjectVT = Ins[ArgNo].VT;
4129 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4130 VecArgOffset += ArgSize;
4158 VecArgOffset = ((VecArgOffset+15)/16)*16;
4159 VecArgOffset += 12*16;
4166 unsigned nAltivecParamsAtEnd = 0;
4168 unsigned CurArgIdx = 0;
4169 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
4171 bool needsLoad =
false;
4172 EVT ObjectVT = Ins[ArgNo].VT;
4174 unsigned ArgSize = ObjSize;
4176 if (Ins[ArgNo].isOrigArg()) {
4177 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4178 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4180 unsigned CurArgOffset = ArgOffset;
4185 if (isVarArg || isPPC64) {
4186 MinReservedArea = ((MinReservedArea+15)/16)*16;
4190 }
else nAltivecParamsAtEnd++;
4200 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
4204 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4207 if (ObjSize==1 || ObjSize==2) {
4208 CurArgOffset = CurArgOffset + (4 - ObjSize);
4214 if (ObjSize==1 || ObjSize==2) {
4215 if (GPR_idx != Num_GPR_Regs) {
4218 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4220 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4230 ArgOffset += PtrByteSize;
4234 for (
unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4238 if (GPR_idx != Num_GPR_Regs) {
4241 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4243 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4251 ArgOffset += PtrByteSize;
4253 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4265 if (GPR_idx != Num_GPR_Regs) {
4266 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4275 ArgSize = PtrByteSize;
4278 ArgOffset += PtrByteSize;
4283 if (GPR_idx != Num_GPR_Regs) {
4284 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4290 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4295 ArgSize = PtrByteSize;
4305 if (GPR_idx != Num_GPR_Regs) {
4307 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4310 if (FPR_idx != Num_FPR_Regs) {
4314 VReg = MF.
addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4316 VReg = MF.
addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4325 ArgOffset += isPPC64 ? 8 : ObjSize;
4333 if (VR_idx != Num_VR_Regs) {
4334 unsigned VReg = MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4337 while ((ArgOffset % 16) != 0) {
4338 ArgOffset += PtrByteSize;
4339 if (GPR_idx != Num_GPR_Regs)
4343 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs);
4347 if (!isVarArg && !isPPC64) {
4349 CurArgOffset = VecArgOffset;
4353 ArgOffset = ((ArgOffset+15)/16)*16;
4354 CurArgOffset = ArgOffset;
4366 CurArgOffset + (ArgSize - ObjSize),
4376 if (nAltivecParamsAtEnd) {
4377 MinReservedArea = ((MinReservedArea+15)/16)*16;
4378 MinReservedArea += 16*nAltivecParamsAtEnd;
4382 MinReservedArea =
std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4395 int Depth = ArgOffset;
4405 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4409 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4411 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4423 if (!MemOps.
empty())
4432 unsigned ParamSize) {
4434 if (!isTailCall)
return 0;
4438 int SPDiff = (int)CallerMinReservedArea - (
int)ParamSize;
4440 if (SPDiff < FI->getTailCallSPDelta())
4478 if (
const auto *
F = dyn_cast<Function>(GV)) {
4511 const unsigned PtrByteSize = 8;
4515 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4516 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4520 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4524 const unsigned NumFPRs = 13;
4526 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4528 unsigned NumBytes = LinkageSize;
4529 unsigned AvailableFPRs = NumFPRs;
4530 unsigned AvailableVRs = NumVRs;
4533 if (Param.Flags.isNest())
continue;
4536 PtrByteSize, LinkageSize, ParamAreaSize,
4537 NumBytes, AvailableFPRs, AvailableVRs,
4553 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4554 const Value* CalleeArg = *CalleeArgIter;
4555 const Value* CallerArg = &(*CallerArgIter);
4556 if (CalleeArg == CallerArg)
4564 isa<UndefValue>(CalleeArg))
4582 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4593 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4603 if (
DisableSCO && !TailCallOpt)
return false;
4606 if (isVarArg)
return false;
4638 if (Caller.getCallingConv() != CalleeCC &&
4644 !isa<ExternalSymbolSDNode>(Callee))
4674 PPCTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
4690 for (
unsigned i = 0; i != Ins.
size(); i++) {
4692 if (Flags.
isByVal())
return false;
4702 return G->getGlobal()->hasHiddenVisibility()
4703 ||
G->getGlobal()->hasProtectedVisibility();
4713 if (!C)
return nullptr;
4716 if ((Addr & 3) != 0 ||
4717 SignExtend32<26>(Addr) != Addr)
4729 struct TailCallArgumentInfo {
4734 TailCallArgumentInfo() =
default;
4744 for (
unsigned i = 0, e = TailCallArgs.
size(); i != e; ++i) {
4746 SDValue FIN = TailCallArgs[i].FrameIdxOp;
4747 int FI = TailCallArgs[i].FrameIdx;
4750 Chain, dl, Arg, FIN,
4759 int SPDiff,
const SDLoc &dl) {
4765 bool isPPC64 = Subtarget.
isPPC64();
4766 int SlotSize = isPPC64 ? 8 : 4;
4767 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4769 NewRetAddrLoc,
true);
4772 Chain = DAG.
getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4778 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4782 Chain = DAG.
getStore(Chain, dl, OldFP, NewFramePtrIdx,
4796 int Offset = ArgOffset + SPDiff;
4801 TailCallArgumentInfo
Info;
4803 Info.FrameIdxOp = FIN;
4811 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4817 LROpOut = getReturnAddrFrameIndex(DAG);
4823 if (Subtarget.isDarwinABI()) {
4824 FPOpOut = getFramePointerFrameIndex(DAG);
4851 SDValue PtrOff,
int SPDiff,
unsigned ArgOffset,
bool isPPC64,
4874 const SDLoc &dl,
int SPDiff,
unsigned NumBytes,
SDValue LROp,
4884 if (!MemOpChains2.
empty())
4904 return G->getGlobal()->getValueType()->isFunctionTy();
4912 SDValue CallSeqStart,
const SDLoc &dl,
int SPDiff,
bool isTailCall,
4913 bool isPatchPoint,
bool hasNest,
4917 bool isPPC64 = Subtarget.
isPPC64();
4927 bool needIndirectCall =
true;
4928 if (!isSVR4ABI || !isPPC64)
4932 needIndirectCall =
false;
4941 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee))
4942 GV =
G->getGlobal();
4944 bool UsePlt = !Local && Subtarget.
isTargetELF() && !isPPC64;
4950 unsigned OpFlags = 0;
4959 needIndirectCall =
false;
4963 unsigned char OpFlags = 0;
4970 needIndirectCall =
false;
4980 needIndirectCall =
false;
4983 if (needIndirectCall) {
4986 SDValue MTCTROps[] = {Chain, Callee, InFlag};
4988 if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
5060 MTCTROps[0] = Chain;
5061 MTCTROps[1] = LoadFuncPtr;
5062 MTCTROps[2] = InFlag;
5076 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
5094 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5096 RegsToPass[i].second.getValueType()));
5101 if (isSVR4ABI && isPPC64) {
5114 SDValue PPCTargetLowering::LowerCallResult(
5128 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
5161 SDValue PPCTargetLowering::FinishCall(
5168 std::vector<EVT> NodeTys;
5170 unsigned CallOpc =
PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
5171 SPDiff, isTailCall, isPatchPoint, hasNest,
5172 RegsToPass, Ops, NodeTys, CS, Subtarget);
5175 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
5181 int BytesCalleePops =
5189 assert(Mask &&
"Missing call preserved mask for calling convention");
5198 cast<RegisterSDNode>(Callee)->
getReg() == PPC::CTR) ||
5201 isa<ConstantSDNode>(Callee)) &&
5202 "Expecting an global address, external symbol, absolute value or register");
5218 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
5234 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5248 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
5257 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
5258 Ins, dl, DAG, InVals);
5280 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5282 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5283 isVarArg, Outs, Ins, DAG);
5285 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5292 assert(isa<GlobalAddressSDNode>(Callee) &&
5293 "Callee should be an llvm::Function object.");
5296 cast<GlobalAddressSDNode>(Callee)->getGlobal();
5297 const unsigned Width =
5298 80 - strlen(
"TCO caller: ") - strlen(
", callee linkage: 0, 0");
5299 dbgs() <<
"TCO caller: " 5301 <<
", callee linkage: " << GV->getVisibility() <<
", " 5302 << GV->getLinkage() <<
"\n");
5308 "site marked musttail");
5313 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5315 Callee = LowerGlobalAddress(Callee, DAG);
5317 if (Subtarget.isSVR4ABI()) {
5318 if (Subtarget.isPPC64())
5319 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
5320 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5321 dl, DAG, InVals, CS);
5323 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
5324 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5325 dl, DAG, InVals, CS);
5328 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
5329 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5330 dl, DAG, InVals, CS);
5333 SDValue PPCTargetLowering::LowerCall_32SVR4(
5335 bool isTailCall,
bool isPatchPoint,
5348 unsigned PtrByteSize = 4;
5370 CCInfo.
AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5373 CCInfo.PreAnalyzeCallOperands(Outs);
5379 unsigned NumArgs = Outs.
size();
5381 for (
unsigned i = 0; i != NumArgs; ++i) {
5382 MVT ArgVT = Outs[i].VT;
5386 if (Outs[i].IsFixed) {
5396 errs() <<
"Call operand #" << i <<
" has unhandled type " 5404 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5406 CCInfo.clearWasPPCF128();
5413 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5415 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5420 unsigned NumBytes = CCByValInfo.getNextStackOffset();
5434 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5445 bool seenFloatArg =
false;
5447 for (
unsigned i = 0, j = 0, e = ArgLocs.
size();
5459 assert((j < ByValArgLocs.
size()) &&
"Index out of bounds!");
5478 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5480 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
5481 NewCallSeqStart.getNode());
5482 Chain = CallSeqStart = NewCallSeqStart;
5522 if (!MemOpChains.
empty())
5528 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
5530 RegsToPass[i].
second, InFlag);
5538 SDValue Ops[] = { Chain, InFlag };
5550 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5552 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5553 NumBytes, Ins, InVals, CS);
5558 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5566 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5568 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
5570 return NewCallSeqStart;
5573 SDValue PPCTargetLowering::LowerCall_64SVR4(
5575 bool isTailCall,
bool isPatchPoint,
5581 bool isELFv2ABI = Subtarget.isELFv2ABI();
5582 bool isLittleEndian = Subtarget.isLittleEndian();
5583 unsigned NumOps = Outs.
size();
5584 bool hasNest =
false;
5585 bool IsSibCall =
false;
5588 unsigned PtrByteSize = 8;
5605 "fastcc not supported on varargs functions");
5611 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5612 unsigned NumBytes = LinkageSize;
5613 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5614 unsigned &QFPR_idx = FPR_idx;
5617 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5618 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5622 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5628 const unsigned NumQFPRs = NumFPRs;
5634 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv ==
CallingConv::Fast;
5635 if (!HasParameterArea) {
5636 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5637 unsigned AvailableFPRs = NumFPRs;
5638 unsigned AvailableVRs = NumVRs;
5639 unsigned NumBytesTmp = NumBytes;
5640 for (
unsigned i = 0; i != NumOps; ++i) {
5641 if (Outs[i].Flags.
isNest())
continue;
5643 PtrByteSize, LinkageSize, ParamAreaSize,
5644 NumBytesTmp, AvailableFPRs, AvailableVRs,
5645 Subtarget.hasQPX()))
5646 HasParameterArea =
true;
5652 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5657 HasParameterArea =
false;
5660 for (
unsigned i = 0; i != NumOps; ++i) {
5662 EVT ArgVT = Outs[i].VT;
5663 EVT OrigVT = Outs[i].ArgVT;
5671 if (NumGPRsUsed > NumGPRs)
5672 HasParameterArea =
true;
5679 if (++NumGPRsUsed <= NumGPRs)
5689 if (++NumVRsUsed <= NumVRs)
5695 if (Subtarget.hasQPX()) {
5696 if (++NumFPRsUsed <= NumFPRs)
5699 if (++NumVRsUsed <= NumVRs)
5707 if (++NumFPRsUsed <= NumFPRs)
5711 HasParameterArea =
true;
5718 NumBytes = ((NumBytes + Align - 1) / Align) *
Align;
5722 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5725 unsigned NumBytesActuallyUsed = NumBytes;
5735 if (HasParameterArea)
5736 NumBytes =
std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5738 NumBytes = LinkageSize;
5766 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5777 unsigned ArgOffset = LinkageSize;
5783 for (
unsigned i = 0; i != NumOps; ++i) {
5786 EVT ArgVT = Outs[i].VT;
5787 EVT OrigVT = Outs[i].ArgVT;
5796 auto ComputePtrOff = [&]() {
5800 ArgOffset = ((ArgOffset + Align - 1) / Align) *
Align;
5811 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5812 GPR_idx = std::min(GPR_idx, NumGPRs);
5842 if (Size==1 || Size==2 || Size==4) {
5844 if (GPR_idx != NumGPRs) {
5848 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5850 ArgOffset += PtrByteSize;
5855 if (GPR_idx == NumGPRs && Size < 8) {
5857 if (!isLittleEndian) {
5862 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5865 ArgOffset += PtrByteSize;
5882 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5887 if (Size < 8 && GPR_idx != NumGPRs) {
5897 if (!isLittleEndian) {
5901 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5909 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5912 ArgOffset += PtrByteSize;
5918 for (
unsigned j=0; j<
Size; j+=PtrByteSize) {
5921 if (GPR_idx != NumGPRs) {
5925 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5926 ArgOffset += PtrByteSize;
5928 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5942 RegsToPass.
push_back(std::make_pair(PPC::X11, Arg));
5950 if (GPR_idx != NumGPRs) {
5951 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
5956 assert(HasParameterArea &&
5957 "Parameter area must exist to pass an argument in memory.");
5959 true, isTailCall,
false, MemOpChains,
5960 TailCallArguments, dl);
5962 ArgOffset += PtrByteSize;
5965 ArgOffset += PtrByteSize;
5978 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
5979 bool NeededLoad =
false;
5982 if (FPR_idx != NumFPRs)
5983 RegsToPass.
push_back(std::make_pair(FPR[FPR_idx++], Arg));
5986 if (!NeedGPROrStack)
6008 }
else if (ArgOffset % PtrByteSize != 0) {
6012 if (!isLittleEndian)
6020 if (!isLittleEndian)
6030 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6043 assert(HasParameterArea &&
6044 "Parameter area must exist to pass an argument in memory.");
6046 true, isTailCall,
false, MemOpChains,
6047 TailCallArguments, dl);
6058 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6070 if (!Subtarget.hasQPX()) {
6080 assert(HasParameterArea &&
6081 "Parameter area must exist if we have a varargs call.");
6087 if (VR_idx != NumVRs) {
6091 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
6094 for (
unsigned i=0; i<16; i+=PtrByteSize) {
6095 if (GPR_idx == NumGPRs)
6102 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6108 if (VR_idx != NumVRs) {
6109 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
6114 assert(HasParameterArea &&
6115 "Parameter area must exist to pass an argument in memory.");
6117 true, isTailCall,
true, MemOpChains,
6118 TailCallArguments, dl);
6129 "Invalid QPX parameter type");
6136 assert(HasParameterArea &&
6137 "Parameter area must exist if we have a varargs call.");
6143 if (QFPR_idx != NumQFPRs) {
6147 RegsToPass.
push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6149 ArgOffset += (IsF32 ? 16 : 32);
6150 for (
unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6151 if (GPR_idx == NumGPRs)
6158 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6164 if (QFPR_idx != NumQFPRs) {
6165 RegsToPass.
push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6170 assert(HasParameterArea &&
6171 "Parameter area must exist to pass an argument in memory.");
6173 true, isTailCall,
true, MemOpChains,
6174 TailCallArguments, dl);
6176 ArgOffset += (IsF32 ? 16 : 32);
6180 ArgOffset += (IsF32 ? 16 : 32);
6186 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6187 "mismatch in size of parameter area");
6188 (void)NumBytesActuallyUsed;
6190 if (!MemOpChains.
empty())
6196 if (!isTailCall && !isPatchPoint &&
6198 !isa<ExternalSymbolSDNode>(Callee)) {
6203 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6212 if (isELFv2ABI && !isPatchPoint)
6213 RegsToPass.
push_back(std::make_pair((
unsigned)PPC::X12, Callee));
6219 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
6221 RegsToPass[i].
second, InFlag);
6225 if (isTailCall && !IsSibCall)
6229 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
6230 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
6231 SPDiff, NumBytes, Ins, InVals, CS);
6234 SDValue PPCTargetLowering::LowerCall_Darwin(
6236 bool isTailCall,
bool isPatchPoint,
6242 unsigned NumOps = Outs.
size();
6246 unsigned PtrByteSize = isPPC64 ? 8 : 4;
6262 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6263 unsigned NumBytes = LinkageSize;
6271 unsigned nAltivecParamsAtEnd = 0;
6272 for (
unsigned i = 0; i != NumOps; ++i) {
6274 EVT ArgVT = Outs[i].VT;
6279 if (!isVarArg && !isPPC64) {
6282 nAltivecParamsAtEnd++;
6286 NumBytes = ((NumBytes+15)/16)*16;
6292 if (nAltivecParamsAtEnd) {
6293 NumBytes = ((NumBytes+15)/16)*16;
6294 NumBytes += 16*nAltivecParamsAtEnd;
6302 NumBytes =
std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6326 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6341 unsigned ArgOffset = LinkageSize;
6342 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6346 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6349 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6350 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6354 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6357 const unsigned NumFPRs = 13;
6366 for (
unsigned i = 0; i != NumOps; ++i) {
6392 if (Size==1 || Size==2) {
6394 if (GPR_idx != NumGPRs) {
6398 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6400 ArgOffset += PtrByteSize;
6405 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6408 ArgOffset += PtrByteSize;
6415 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6422 for (
unsigned j=0; j<
Size; j+=PtrByteSize) {
6425 if (GPR_idx != NumGPRs) {
6429 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6430 ArgOffset += PtrByteSize;
6432 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6444 if (GPR_idx != NumGPRs) {
6448 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
6451 isPPC64, isTailCall,
false, MemOpChains,
6452 TailCallArguments, dl);
6454 ArgOffset += PtrByteSize;
6458 if (FPR_idx != NumFPRs) {
6459 RegsToPass.
push_back(std::make_pair(FPR[FPR_idx++], Arg));
6467 if (GPR_idx != NumGPRs) {
6471 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6479 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6485 if (GPR_idx != NumGPRs)
6493 isPPC64, isTailCall,
false, MemOpChains,
6494 TailCallArguments, dl);
6510 while (ArgOffset % 16 !=0) {
6511 ArgOffset += PtrByteSize;
6512 if (GPR_idx != NumGPRs)
6522 if (VR_idx != NumVRs) {
6526 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
6529 for (
unsigned i=0; i<16; i+=PtrByteSize) {
6530 if (GPR_idx == NumGPRs)
6537 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6544 if (VR_idx != NumVRs) {
6546 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
6547 }
else if (nAltivecParamsAtEnd==0) {
6550 isPPC64, isTailCall,
true, MemOpChains,
6551 TailCallArguments, dl);
6562 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6565 ArgOffset = ((ArgOffset+15)/16)*16;
6567 for (
unsigned i = 0; i != NumOps; ++i) {
6569 EVT ArgType = Outs[i].VT;
6576 isPPC64, isTailCall,
true, MemOpChains,
6577 TailCallArguments, dl);
6584 if (!MemOpChains.
empty())
6592 !isa<ExternalSymbolSDNode>(Callee) &&
6594 RegsToPass.
push_back(std::make_pair((
unsigned)(isPPC64 ? PPC::X12 :
6595 PPC::R12), Callee));
6600 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
6602 RegsToPass[i].
second, InFlag);
6610 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6612 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6613 NumBytes, Ins, InVals, CS);
6622 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6647 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
6678 if (PPC::G8RCRegClass.
contains(*I))
6680 else if (PPC::F8RCRegClass.
contains(*I))
6682 else if (PPC::CRRCRegClass.
contains(*I))
6684 else if (PPC::VRRCRegClass.
contains(*I))
6701 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(
SDValue Op,
6710 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6712 SDValue Ops[2] = {Chain, FPSIdx};
6726 bool isPPC64 = Subtarget.isPPC64();
6727 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
6747 bool isPPC64 = Subtarget.isPPC64();
6758 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
6762 FI->setReturnAddrSaveIndex(RASI);
6768 PPCTargetLowering::getFramePointerFrameIndex(
SelectionDAG & DAG)
const {
6770 bool isPPC64 = Subtarget.isPPC64();
6781 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
6785 FI->setFramePointerSaveIndex(FPSI);
6803 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6805 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
6814 bool isPPC64 = Subtarget.isPPC64();
6838 return LowerVectorLoad(Op, DAG);
6841 "Custom lowering only for i1 loads");
6863 return LowerVectorStore(Op, DAG);
6866 "Custom lowering only for i1 stores");
6886 "Custom lowering only for i1 results");
7001 void PPCTargetLowering::LowerFP_TO_INTForReuse(
SDValue Op, ReuseLoadInfo &RLI,
7003 const SDLoc &dl)
const {
7006 if (Src.getValueType() ==
MVT::f32)
7021 "i64 FP_TO_UINT is supported only with FPCVT");
7032 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7053 MPI = MPI.
getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7066 const SDLoc &dl)
const {
7070 if (Src.getValueType() ==
MVT::f32)
7086 "i64 FP_TO_UINT is supported only with FPCVT");
7097 const SDLoc &dl)
const {
7122 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7143 if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7144 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7147 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7150 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7161 bool PPCTargetLowering::canReuseLoadAddress(
SDValue Op,
EVT MemVT,
7172 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7186 "Non-pre-inc AM on PPC?");
7207 void PPCTargetLowering::spliceIntoChain(
SDValue ResChain,
7213 SDLoc dl(NewResChain);
7218 "A new TF really is required here");
7227 bool PPCTargetLowering::directMoveIsProfitable(
const SDValue &Op)
const {
7235 if (!Subtarget.hasP9Vector() && MMO->
getSize() <= 2)
7243 if (UI.getUse().get().getResNo() != 0)
7259 const SDLoc &dl)
const {
7262 "Invalid floating point type as target of conversion");
7263 assert(Subtarget.hasFPCVT() &&
7264 "Int to FP conversions with direct moves require FPCVT");
7300 for (
unsigned i = 1; i < NumConcat; ++i)
7307 const SDLoc &dl)
const {
7311 "Unexpected conversion type");
7313 "Supports conversions to v2f64/v4f32 only.");
7324 for (
unsigned i = 0; i < WideNumElts; ++i)
7327 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
7328 int SaveElts = FourEltRes ? 4 : 2;
7329 if (Subtarget.isLittleEndian())
7330 for (
int i = 0; i < SaveElts; i++)
7331 ShuffV[i * Stride] = i;
7333 for (
int i = 1; i <= SaveElts; i++)
7334 ShuffV[i * Stride - 1] = i - 1;
7343 if (!Subtarget.hasP9Altivec() && SignedConv) {
7344 Arrange = DAG.
getBitcast(IntermediateVT, Arrange);
7348 Extend = DAG.
getNode(ExtendOp, dl, IntermediateVT, Arrange);
7361 return LowerINT_TO_FPVector(Op, DAG, dl);
7399 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
7400 Subtarget.isPPC64() && Subtarget.hasFPCVT())
7401 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
7404 "UINT_TO_FP is supported only with FPCVT");
7430 !Subtarget.hasFPCVT() &&
7469 if (canReuseLoadAddress(SINT,
MVT::i64, RLI, DAG)) {
7471 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7472 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
7473 }
else if (Subtarget.hasLFIWAX() &&
7477 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7478 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7482 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
7483 }
else if (Subtarget.hasFPCVT() &&
7487 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7488 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7492 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
7493 }
else if (((Subtarget.hasLFIWAX() &&
7495 (Subtarget.hasFPCVT() &&
7510 "Expected an i32 store");
7520 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7521 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7538 "Unhandled INT_TO_FP type in custom expander!");
7548 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
7562 "Expected an i32 store");
7573 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7574 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7580 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
7582 assert(Subtarget.isPPC64() &&
7583 "i32->FP without LFIWAX supported only on PPC64");
7698 SDValue OutOps[] = { OutLo, OutHi };
7727 SDValue OutOps[] = { OutLo, OutHi };
7756 SDValue OutOps[] = { OutLo, OutHi };
7768 assert(Val >= -16 && Val <= 15 &&
"vsplti is out of range!");
7770 static const MVT VTys[] = {
7780 EVT CanonicalVT = VTys[SplatSize-1];
7824 for (
unsigned i = 0; i != 16; ++i)
7852 bool IsSplat =
true;
7853 bool IsLoad =
false;
7880 return !(IsSplat && IsLoad);
7909 assert(BVN &&
"Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
7923 "BUILD_VECTOR for v4i1 does not have 4 operands");
7926 for (
unsigned i = 0; i < 4; ++i) {
7928 if (!isa<ConstantSDNode>(BVN->
getOperand(i))) {
7941 for (
unsigned i = 0; i < 4; ++i) {
7962 for (
unsigned i = 0; i < 4; ++i) {
7970 if (StoreSize > 4) {
7985 if (!Stores.
empty())
8012 if (Subtarget.hasQPX())
8016 APInt APSplatBits, APSplatUndef;
8017 unsigned SplatBitSize;
8020 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
8021 SplatBitSize > 32) {
8025 if (Subtarget.hasVSX() &&
8027 Subtarget.hasP8Vector()))
8034 unsigned SplatSize = SplatBitSize / 8;
8039 if (SplatBits == 0) {
8049 if (Subtarget.hasP9Vector() && SplatSize == 1) {
8074 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8076 if (SextVal >= -16 && SextVal <= 15)
8088 if (SextVal >= -32 && SextVal <= 31) {
8106 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8120 static const signed char SplatCsts[] = {
8121 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8122 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8128 int i = SplatCsts[idx];
8132 unsigned TypeShiftAmt = i & (SplatBitSize-1);
8135 if (SextVal == (
int)((
unsigned)i << TypeShiftAmt)) {
8137 static const unsigned IIDs[] = {
8146 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
8148 static const unsigned IIDs[] = {
8157 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
8159 static const unsigned IIDs[] = {
8168 if (SextVal == (
int)(((
unsigned)i << TypeShiftAmt) |
8169 ((
unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8171 static const unsigned IIDs[] = {
8180 if (SextVal == (
int)(((
unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8182 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8186 if (SextVal == (
int)(((
unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8188 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8192 if (SextVal == (
int)(((
unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8194 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8207 unsigned OpNum = (PFEntry >> 26) & 0x0F;
8208 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8209 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
8224 if (OpNum == OP_COPY) {
8225 if (LHSID == (1*9+2)*9+3)
return LHS;
8226 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
8238 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
8239 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8240 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
8241 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8244 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8245 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8246 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8247 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8250 for (
unsigned i = 0; i != 16; ++i)
8251 ShufIdxs[i] = (i&3)+0;
8254 for (
unsigned i = 0; i != 16; ++i)
8255 ShufIdxs[i] = (i&3)+4;
8258 for (
unsigned i = 0; i != 16; ++i)
8259 ShufIdxs[i] = (i&3)+8;
8262 for (
unsigned i = 0; i != 16; ++i)
8263 ShufIdxs[i] = (i&3)+12;
8284 const unsigned BytesInVector = 16;
8285 bool IsLE = Subtarget.isLittleEndian();
8289 unsigned ShiftElts = 0, InsertAtByte = 0;
8293 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
8294 0, 15, 14, 13, 12, 11, 10, 9};
8295 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
8296 1, 2, 3, 4, 5, 6, 7, 8};
8299 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
8311 bool FoundCandidate =
false;
8315 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
8318 for (
unsigned i = 0; i < BytesInVector; ++i) {
8319 unsigned CurrentElement = Mask[i];
8322 if (V2.
isUndef() && CurrentElement != VINSERTBSrcElem)
8325 bool OtherElementsInOrder =
true;
8328 for (
unsigned j = 0; j < BytesInVector; ++j) {
8335 (!V2.
isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
8336 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
8337 OtherElementsInOrder =
false;
8344 if (OtherElementsInOrder) {
8351 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
8352 : BigEndianShifts[CurrentElement & 0xF];
8353 Swap = CurrentElement < BytesInVector;
8355 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
8356 FoundCandidate =
true;
8361 if (!FoundCandidate)
8385 const unsigned NumHalfWords = 8;
8386 const unsigned BytesInVector = NumHalfWords * 2;
8391 bool IsLE = Subtarget.isLittleEndian();
8395 unsigned ShiftElts = 0, InsertAtByte = 0;
8399 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
8400 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
8403 uint32_t OriginalOrderLow = 0x1234567;
8404 uint32_t OriginalOrderHigh = 0x89ABCDEF;
8407 for (
unsigned i = 0; i < NumHalfWords; ++i) {
8408 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8425 bool FoundCandidate =
false;
8428 for (
unsigned i = 0; i < NumHalfWords; ++i) {
8429 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8430 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
8431 uint32_t MaskOtherElts = ~(0xF << MaskShift);
8439 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
8440 TargetOrder = OriginalOrderLow;
8444 if (MaskOneElt == VINSERTHSrcElem &&
8445 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8446 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8447 FoundCandidate =
true;
8453 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
8455 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8457 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
8458 : BigEndianShifts[MaskOneElt & 0x7];
8459 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8460 Swap = MaskOneElt < NumHalfWords;
8461 FoundCandidate =
true;
8467 if (!FoundCandidate)
8503 bool isLittleEndian = Subtarget.isLittleEndian();
8505 unsigned ShiftElts, InsertAtByte;
8507 if (Subtarget.hasP9Vector() &&
8526 if (Subtarget.hasP9Altivec()) {
8528 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
8531 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
8535 if (Subtarget.hasVSX() &&
8548 if (Subtarget.hasVSX() &&
8561 if (Subtarget.hasP9Vector()) {
8581 if (Subtarget.hasVSX()) {
8599 if (Subtarget.hasQPX()) {
8600 if (VT.getVectorNumElements() != 4)
8606 if (AlignIdx != -1) {
8611 if (SplatIdx >= 4) {
8624 for (
unsigned i = 0; i < 4; ++i) {
8626 unsigned mm = m >= 0 ? (
unsigned) m : i;
8627 idx |= mm << (3-i)*3;
8651 (Subtarget.hasP8Altivec() && (
8662 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
8672 (Subtarget.hasP8Altivec() && (
8682 unsigned PFIndexes[4];
8683 bool isFourElementShuffle =
true;
8684 for (
unsigned i = 0; i != 4 && isFourElementShuffle; ++i) {
8686 for (
unsigned j = 0; j != 4; ++j) {
8687 if (PermMask[i*4+j] < 0)
8690 unsigned ByteSource = PermMask[i*4+j];
8691 if ((ByteSource & 3) != j) {
8692 isFourElementShuffle =
false;
8697 EltNo = ByteSource/4;
8698 }
else if (EltNo != ByteSource/4) {
8699 isFourElementShuffle =
false;
8703 PFIndexes[i] = EltNo;
8711 if (isFourElementShuffle && !isLittleEndian) {
8713 unsigned PFTableIndex =
8714 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8717 unsigned Cost = (PFEntry >> 30);
8749 for (
unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8750 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
8752 for (
unsigned j = 0; j != BytesPerElement; ++j)
8775 unsigned IntrinsicID =
8776 cast<ConstantSDNode>(Intrin.
getOperand(0))->getZExtValue();
8779 switch (IntrinsicID) {
8817 switch (IntrinsicID) {
8897 if (Subtarget.
hasVSX()) {
8898 switch (IntrinsicID) {
8952 switch (IntrinsicID) {
9021 unsigned IntrinsicID =
9022 cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
9028 if (Subtarget.isPPC64())
9066 switch (cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue()) {
9069 BitNo = 0; InvertBit =
false;
9072 BitNo = 0; InvertBit =
true;
9075 BitNo = 2; InvertBit =
false;
9078 BitNo = 2; InvertBit =
true;
9100 int ArgStart = isa<ConstantSDNode>(Op.
getOperand(0)) ? 0 : 1;
9102 switch (cast<ConstantSDNode>(Op.
getOperand(ArgStart))->getZExtValue()) {
9104 assert(ArgStart == 1 &&
"llvm.ppc.cfence must carry a chain argument.");
9105 assert(Subtarget.isPPC64() &&
"Only 64-bit is supported for now.");
9139 int VectorIndex = 0;
9140 if (Subtarget.isLittleEndian())
9152 "Expecting an atomic compare-and-swap here.");
9154 auto *AtomicNode = cast<AtomicSDNode>(Op.
getNode());
9155 EVT MemVT = AtomicNode->getMemoryVT();
9156 if (MemVT.getSizeInBits() >= 32)
9166 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9173 for (
int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9174 Ops.
push_back(AtomicNode->getOperand(i));
9202 "Should only be called for ISD::INSERT_VECTOR_ELT");
9217 unsigned InsertAtElement = C->getZExtValue();
9218 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
9219 if (Subtarget.isLittleEndian()) {
9220 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
9234 "Unknown extract_vector_elt type");
9274 unsigned Offset = 4*cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
9281 if (!Subtarget.useCRBits())
9308 SDValue Vals[4], LoadChains[4];
9309 for (
unsigned Idx = 0; Idx < 4; ++Idx) {
9311 if (ScalarVT != ScalarMemVT)
9315 ScalarMemVT,
MinAlign(Alignment, Idx * Stride),
9318 Load = DAG.
getLoad(ScalarVT, dl, LoadChain, BasePtr,
9325 "Unknown addressing mode on vector load");
9331 LoadChains[Idx] = Load.
getValue(1);
9335 BasePtr.getValueType()));
9346 SDValue RetOps[] = { Value, TF };
9356 SDValue VectElmts[4], VectElmtChains[4];
9357 for (
unsigned i = 0; i < 4; ++i) {
9365 VectElmtChains[i] = VectElmts[i].
getValue(1);
9371 SDValue RVals[] = { Value, LoadChain };
9398 for (
unsigned Idx = 0; Idx < 4; ++Idx) {
9403 if (ScalarVT != ScalarMemVT)
9407 ScalarMemVT,
MinAlign(Alignment, Idx * Stride),
9410 Store = DAG.
getStore(StoreChain, dl, Ex, BasePtr,
9417 "Unknown addressing mode on vector store");
9425 Stores[Idx] =
Store;
9473 SDValue Loads[4], LoadChains[4];
9474 for (
unsigned i = 0; i < 4; ++i) {
9481 LoadChains[i] = Loads[i].
getValue(1);
9487 for (
unsigned i = 0; i < 4; ++i) {
9535 LHS, RHS, Zero, DAG, dl);
9538 bool isLittleEndian = Subtarget.isLittleEndian();
9555 for (
unsigned i = 0; i != 8; ++i) {
9556 if (isLittleEndian) {
9558 Ops[i*2+1] = 2*i+16;
9561 Ops[i*2+1] = 2*i+1+16;
9579 "Only set vector abs as custom, scalar abs shouldn't reach here!");
9582 "Unexpected vector element type!");
9584 "Current subtarget doesn't support smax v2i64!");
9633 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
9640 case ISD::LOAD:
return LowerLOAD(Op, DAG);
9662 case ISD::MUL:
return LowerMUL(Op, DAG);
9663 case ISD::ABS:
return LowerABS(Op, DAG);
9675 return LowerINTRINSIC_VOID(Op, DAG);
9678 return LowerREM(Op, DAG);
9680 return LowerBSWAP(Op, DAG);
9682 return LowerATOMIC_CMP_SWAP(Op, DAG);
9692 llvm_unreachable(
"Do not know how to custom type legalize this operation!");
9703 if (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue() !=
9708 "Unexpected result type for CTR decrement intrinsic");
9720 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
9775 if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
9789 unsigned AtomicSize,
9792 unsigned CmpPred)
const {
9796 auto LoadMnemonic = PPC::LDARX;
9797 auto StoreMnemonic = PPC::STDCX;
9798 switch (AtomicSize) {
9802 LoadMnemonic = PPC::LBARX;
9803 StoreMnemonic = PPC::STBCX;
9804 assert(Subtarget.hasPartwordAtomics() &&
"Call this only with size >=4");
9807 LoadMnemonic = PPC::LHARX;
9808 StoreMnemonic = PPC::STHCX;
9809 assert(Subtarget.hasPartwordAtomics() &&
"Call this only with size >=4");
9812 LoadMnemonic = PPC::LWARX;
9813 StoreMnemonic = PPC::STWCX;
9816 LoadMnemonic = PPC::LDARX;
9817 StoreMnemonic = PPC::STDCX;
9844 unsigned TmpReg = (!BinOpcode) ? incr :
9846 : &PPC::GPRCRegClass);
9871 BuildMI(BB, dl, TII->
get(LoadMnemonic), dest)
9872 .addReg(ptrA).
addReg(ptrB);
9877 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
9879 BuildMI(BB, dl, TII->
get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
9880 ExtReg).addReg(dest);
9881 BuildMI(BB, dl, TII->
get(CmpOpcode), PPC::CR0)
9882 .addReg(incr).
addReg(ExtReg);
9884 BuildMI(BB, dl, TII->
get(CmpOpcode), PPC::CR0)
9885 .addReg(incr).
addReg(dest);
9909 unsigned BinOpcode,
unsigned CmpOpcode,
unsigned CmpPred)
const {
9911 if (Subtarget.hasPartwordAtomics())
9921 bool is64bit = Subtarget.isPPC64();
9922 bool isLittleEndian = Subtarget.isLittleEndian();
9923 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
9949 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
9992 if (ptrA != ZeroReg) {
9994 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10002 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg)
10003 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10006 .
addImm(is8bit ? 28 : 27);
10007 if (!isLittleEndian)
10008 BuildMI(BB, dl, TII->
get(PPC::XORI), ShiftReg)
10010 .
addImm(is8bit ? 24 : 16);
10012 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
10017 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
10022 BuildMI(BB, dl, TII->
get(PPC::SLW), Incr2Reg).addReg(incr).
addReg(ShiftReg);
10024 BuildMI(BB, dl, TII->
get(PPC::LI), Mask2Reg).addImm(255);
10026 BuildMI(BB, dl, TII->
get(PPC::LI), Mask3Reg).addImm(0);
10027 BuildMI(BB, dl, TII->
get(PPC::ORI), Mask2Reg)
10031 BuildMI(BB, dl, TII->
get(PPC::SLW), MaskReg)
10036 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
10040 BuildMI(BB, dl, TII->
get(BinOpcode), TmpReg)
10043 BuildMI(BB, dl, TII->
get(PPC::ANDC), Tmp2Reg)
10044 .addReg(TmpDestReg)
10052 .addReg(TmpDestReg)
10054 unsigned ValueReg = SReg;
10055 unsigned CmpReg = Incr2Reg;
10056 if (CmpOpcode == PPC::CMPW) {
10058 BuildMI(BB, dl, TII->
get(PPC::SRW), ValueReg)
10062 BuildMI(BB, dl, TII->
get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
10064 ValueReg = ValueSReg;
10067 BuildMI(BB, dl, TII->
get(CmpOpcode), PPC::CR0)
10094 .addReg(TmpDestReg)
10114 assert(TRI->isTypeLegalForClass(*RC,
MVT::i32) &&
"Invalid destination!");
10120 "Invalid Pointer Size!");
10171 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
10173 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::STD))
10184 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
10186 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
10188 MIB =
BuildMI(*thisMBB, MI, DL,
10189 TII->
get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
10196 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::BCLalways)).addMBB(mainMBB);
10199 BuildMI(*thisMBB, MI, DL, TII->
get(PPC::LI), restoreDstReg).addImm(1);
10201 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::EH_SjLj_Setup))
10212 TII->
get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
10215 if (Subtarget.isPPC64()) {
10216 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STD))
10221 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STW))
10228 BuildMI(mainMBB, DL, TII->
get(PPC::LI), mainDstReg).addImm(0);
10233 TII->
get(PPC::PHI), DstReg)
10234 .addReg(mainDstReg).
addMBB(mainMBB)
10252 "Invalid Pointer Size!");
10255 (PVT ==
MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10258 unsigned FP = (PVT ==
MVT::i64) ? PPC::X31 : PPC::R31;
10259 unsigned SP = (PVT ==
MVT::i64) ? PPC::X1 : PPC::R1;
10292 .addImm(LabelOffset)
10295 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), Tmp)
10296 .addImm(LabelOffset)
10307 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), SP)
10319 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), BP)
10326 if (PVT ==
MVT::i64 && Subtarget.isSVR4ABI()) {
10346 if (MI.
getOpcode() == TargetOpcode::STACKMAP ||
10347 MI.
getOpcode() == TargetOpcode::PATCHPOINT) {
10348 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
10349 MI.
getOpcode() == TargetOpcode::PATCHPOINT) {
10361 if (MI.
getOpcode() == PPC::EH_SjLj_SetJmp32 ||
10362 MI.
getOpcode() == PPC::EH_SjLj_SetJmp64) {
10364 }
else if (MI.
getOpcode() == PPC::EH_SjLj_LongJmp32 ||
10365 MI.
getOpcode() == PPC::EH_SjLj_LongJmp64) {
10378 if (MI.
getOpcode() == PPC::SELECT_CC_I4 ||
10382 if (MI.
getOpcode() == PPC::SELECT_CC_I4 ||
10392 }
else if (MI.
getOpcode() == PPC::SELECT_CC_I4 ||
10396 MI.
getOpcode() == PPC::SELECT_CC_F16 ||
10397 MI.
getOpcode() == PPC::SELECT_CC_QFRC ||
10398 MI.
getOpcode() == PPC::SELECT_CC_QSRC ||
10399 MI.
getOpcode() == PPC::SELECT_CC_QBRC ||
10400 MI.
getOpcode() == PPC::SELECT_CC_VRRC ||
10401 MI.
getOpcode() == PPC::SELECT_CC_VSFRC ||
10402 MI.
getOpcode() == PPC::SELECT_CC_VSSRC ||
10403 MI.
getOpcode() == PPC::SELECT_CC_VSRC ||
10404 MI.
getOpcode() == PPC::SELECT_CC_SPE4 ||
10405 MI.
getOpcode() == PPC::SELECT_CC_SPE ||
10434 F->
insert(It, copy0MBB);
10464 .addImm(SelectPred)
10486 }
else if (MI.
getOpcode() == PPC::ReadTB) {
10518 BuildMI(BB, dl, TII->
get(PPC::MFSPR), HiReg).addImm(269);
10519 BuildMI(BB, dl, TII->
get(PPC::MFSPR), LoReg).addImm(268);
10520 BuildMI(BB, dl, TII->
get(PPC::MFSPR), ReadAgainReg).addImm(269);
10524 BuildMI(BB, dl, TII->
get(PPC::CMPW), CmpReg)
10534 }
else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
10536 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
10538 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
10540 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
10543 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
10545 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
10547 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
10549 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
10552 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
10554 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
10556 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
10558 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
10561 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
10563 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
10565 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
10567 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
10570 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
10572 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
10574 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
10576 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
10579 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
10581 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
10583 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
10585 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
10588 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
10590 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
10592 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
10594 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
10597 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
10599 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
10601 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
10603 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
10606 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
10608 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
10610 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
10612 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
10615 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
10617 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
10619 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
10621 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
10624 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I8)
10626 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I16)
10628 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I32)
10630 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I64)
10632 else if (MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
10633 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
10634 (Subtarget.hasPartwordAtomics() &&
10635 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
10636 (Subtarget.hasPartwordAtomics() &&
10637 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
10638 bool is64bit = MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
10640 auto LoadMnemonic = PPC::LDARX;
10641 auto StoreMnemonic = PPC::STDCX;
10645 case PPC::ATOMIC_CMP_SWAP_I8:
10646 LoadMnemonic = PPC::LBARX;
10647 StoreMnemonic = PPC::STBCX;
10648 assert(Subtarget.hasPartwordAtomics() &&
"No support partword atomics.");
10650 case PPC::ATOMIC_CMP_SWAP_I16:
10651 LoadMnemonic = PPC::LHARX;
10652 StoreMnemonic = PPC::STHCX;
10653 assert(Subtarget.hasPartwordAtomics() &&
"No support partword atomics.");
10655 case PPC::ATOMIC_CMP_SWAP_I32:
10656 LoadMnemonic = PPC::LWARX;
10657 StoreMnemonic = PPC::STWCX;
10659 case PPC::ATOMIC_CMP_SWAP_I64:
10660 LoadMnemonic = PPC::LDARX;
10661 StoreMnemonic = PPC::STDCX;
10675 F->
insert(It, loop1MBB);
10676 F->
insert(It, loop2MBB);
10701 BuildMI(BB, dl, TII->
get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
10734 }
else if (MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
10735 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
10739 bool is64bit = Subtarget.isPPC64();
10740 bool isLittleEndian = Subtarget.isLittleEndian();
10741 bool is8bit = MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
10754 F->
insert(It, loop1MBB);
10755 F->
insert(It, loop2MBB);
10764 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10769 unsigned ShiftReg =
10783 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10816 if (ptrA != ZeroReg) {
10818 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10827 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg)
10828 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10831 .
addImm(is8bit ? 28 : 27);
10832 if (!isLittleEndian)
10833 BuildMI(BB, dl, TII->
get(PPC::XORI), ShiftReg)
10835 .
addImm(is8bit ? 24 : 16);
10837 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
10842 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
10847 BuildMI(BB, dl, TII->
get(PPC::SLW), NewVal2Reg)
10850 BuildMI(BB, dl, TII->
get(PPC::SLW), OldVal2Reg)
10854 BuildMI(BB, dl, TII->
get(PPC::LI), Mask2Reg).addImm(255);
10856 BuildMI(BB, dl, TII->
get(PPC::LI), Mask3Reg).addImm(0);
10857 BuildMI(BB, dl, TII->
get(PPC::ORI), Mask2Reg)
10861 BuildMI(BB, dl, TII->
get(PPC::SLW), MaskReg)
10865 .addReg(NewVal2Reg)
10868 .addReg(OldVal2Reg)
10872 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
10876 .addReg(TmpDestReg)
10878 BuildMI(BB, dl, TII->
get(PPC::CMPW), PPC::CR0)
10889 BuildMI(BB, dl, TII->
get(PPC::ANDC), Tmp2Reg)
10890 .addReg(TmpDestReg)
10909 .addReg(TmpDestReg)
10920 }
else if (MI.
getOpcode() == PPC::FADDrtz) {
10936 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB1)).addImm(31);
10937 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB0)).addImm(30);
10946 MI.
getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10947 MI.
getOpcode() == PPC::ANDIo_1_GT_BIT8) {
10948 unsigned Opcode = (MI.
getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10949 MI.
getOpcode() == PPC::ANDIo_1_GT_BIT8)
10953 MI.
getOpcode() == PPC::ANDIo_1_EQ_BIT8);
10957 Opcode == PPC::ANDIo ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
10960 BuildMI(*BB, MI, dl, TII->
get(Opcode), Dest)
10963 BuildMI(*BB, MI, dl, TII->
get(TargetOpcode::COPY),
10965 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
10966 }
else if (MI.
getOpcode() == PPC::TCHECK_RET) {
10970 BuildMI(*BB, MI, Dl, TII->
get(PPC::TCHECK), CRReg);
10989 int RefinementSteps = Subtarget.
hasRecipPrec() ? 1 : 3;
10992 return RefinementSteps;
10996 int Enabled,
int &RefinementSteps,
10997 bool &UseOneConstNR,
10998 bool Reciprocal)
const {
11000 if ((VT ==
MVT::f32 && Subtarget.hasFRSQRTES()) ||
11001 (VT ==
MVT::f64 && Subtarget.hasFRSQRTE()) ||
11002 (VT ==
MVT::v4f32 && Subtarget.hasAltivec()) ||
11006 if (RefinementSteps == ReciprocalEstimate::Unspecified)
11009 UseOneConstNR =
true;
11017 int &RefinementSteps)
const {
11019 if ((VT ==
MVT::f32 && Subtarget.hasFRES()) ||
11020 (VT ==
MVT::f64 && Subtarget.hasFRE()) ||
11021 (VT ==
MVT::v4f32 && Subtarget.hasAltivec()) ||
11025 if (RefinementSteps == ReciprocalEstimate::Unspecified)
11032 unsigned PPCTargetLowering::combineRepeatedFPDivisors()
const {
11043 switch (Subtarget.getDarwinDirective()) {
11061 Offset += cast<ConstantSDNode>(Loc.
getOperand(1))->getSExtValue();
11070 unsigned Bytes,
int Dist,
11080 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
11081 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
11084 if (FS != BFS || FS != (
int)Bytes)
return false;
11088 SDValue Base1 = Loc, Base2 = BaseLoc;
11089 int64_t Offset1 = 0, Offset2 = 0;
11092 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
11102 if (isGA1 && isGA2 && GV1 == GV2)
11103 return Offset1 == (Offset2 + Dist*Bytes);
11110 unsigned Bytes,
int Dist,
11113 EVT VT =
LS->getMemoryVT();
11120 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
11121 default:
return false;
11166 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
11167 default:
return false;
11233 while (!Queue.empty()) {
11234 SDNode *ChainNext = Queue.pop_back_val();
11235 if (!Visited.
insert(ChainNext).second)
11238 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
11242 if (!Visited.count(ChainLD->getChain().getNode()))
11243 Queue.push_back(ChainLD->getChain().getNode());
11245 for (
const SDUse &
O : ChainNext->
ops())
11246 if (!Visited.count(
O.getNode()))
11247 Queue.push_back(
O.getNode());
11249 LoadRoots.
insert(ChainNext);
11262 Queue.push_back(*
I);
11264 while (!Queue.empty()) {
11265 SDNode *LoadRoot = Queue.pop_back_val();
11266 if (!Visited.
insert(LoadRoot).second)
11269 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
11274 UE = LoadRoot->
use_end(); UI != UE; ++UI)
11275 if (((isa<MemSDNode>(*UI) &&
11276 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
11278 Queue.push_back(*UI);
11311 auto Final = Shifted;
11321 SDValue PPCTargetLowering::ConvertSETCCToSubtract(
SDNode *N,
11322 DAGCombinerInfo &DCI)
const {
11330 if (!DCI.isAfterLegalizeDAG())
11336 UE = N->
use_end(); UI != UE; ++UI) {
11344 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
11346 if (OpSize < Size) {
11363 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(
SDNode *N,
11364 DAGCombinerInfo &DCI)
const {
11368 assert(Subtarget.useCRBits() &&
"Expecting to be tracking CR bits");
11417 if (Op1Known.
Zero != Op2Known.
Zero || Op1Known.
One != Op2Known.
One)
11452 for (
unsigned i = 0; i < 2; ++i) {
11468 while (!BinOps.
empty()) {
11477 for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
11511 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
11512 if (isa<ConstantSDNode>(Inputs[i]))
11516 UE = Inputs[i].getNode()->use_end();
11519 if (User != N && !Visited.
count(User))
11538 for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++i) {
11540 UE = PromOps[i].getNode()->use_end();
11543 if (User != N && !Visited.
count(User))
11563 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
11566 if (isa<ConstantSDNode>(Inputs[i]))
11572 std::list<HandleSDNode> PromOpHandles;
11573 for (
auto &PromOp : PromOps)
11574 PromOpHandles.emplace_back(PromOp);
11581 while (!PromOpHandles.empty()) {
11583 PromOpHandles.pop_back();
11589 if (!isa<ConstantSDNode>(PromOp.
getOperand(0)) &&
11592 PromOpHandles.emplace_front(PromOp);
11597 if (isa<ConstantSDNode>(RepValue))
11606 default: C = 0;
break;
11611 if ((!isa<ConstantSDNode>(PromOp.
getOperand(C)) &&
11613 (!isa<ConstantSDNode>(PromOp.
getOperand(C+1)) &&
11619 PromOpHandles.emplace_front(PromOp);
11627 for (
unsigned i = 0; i < 2; ++i)
11628 if (isa<ConstantSDNode>(Ops[C+i]))
11644 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(
SDNode *N,
11645 DAGCombinerInfo &DCI)
const {
11684 while (!BinOps.empty()) {
11685 SDValue BinOp = BinOps.back();
11691 PromOps.push_back(BinOp);
11693 for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
11724 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
11725 if (isa<ConstantSDNode>(Inputs[i]))
11729 UE = Inputs[i].getNode()->use_end();
11732 if (User != N && !Visited.count(User))
11739 SelectTruncOp[0].insert(std::make_pair(User,
11743 SelectTruncOp[0].insert(std::make_pair(User,
11746 SelectTruncOp[1].insert(std::make_pair(User,
11752 for (
unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
11754 UE = PromOps[i].getNode()->
use_end();
11757 if (User != N && !Visited.count(User))
11764 SelectTruncOp[0].insert(std::make_pair(User,
11768 SelectTruncOp[0].insert(std::make_pair(User,
11771 SelectTruncOp[1].insert(std::make_pair(User,
11778 bool ReallyNeedsExt =
false;
11782 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
11783 if (isa<ConstantSDNode>(Inputs[i]))
11787 Inputs[i].getOperand(0).getValueSizeInBits();
11788 assert(PromBits < OpBits &&
"Truncation not to a smaller bit count?");
11793 OpBits-PromBits))) ||
11796 (OpBits-(PromBits-1)))) {
11797 ReallyNeedsExt =
true;
11805 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
11809 if (isa<ConstantSDNode>(Inputs[i]))
11812 SDValue InSrc = Inputs[i].getOperand(0);
11826 std::list<HandleSDNode> PromOpHandles;
11827 for (
auto &PromOp : PromOps)
11828 PromOpHandles.emplace_back(PromOp);
11834 while (!PromOpHandles.empty()) {
11836 PromOpHandles.pop_back();
11840 default: C = 0;
break;
11845 if ((!isa<ConstantSDNode>(PromOp.
getOperand(C)) &&
11847 (!isa<ConstantSDNode>(PromOp.
getOperand(C+1)) &&
11853 PromOpHandles.emplace_front(PromOp);
11865 PromOpHandles.emplace_front(PromOp);
11874 for (
unsigned i = 0; i < 2; ++i) {
11875 if (!isa<ConstantSDNode>(Ops[C+i]))
11892 auto SI0 = SelectTruncOp[0].
find(PromOp.
getNode());
11893 if (SI0 != SelectTruncOp[0].
end())
11895 auto SI1 = SelectTruncOp[1].
find(PromOp.
getNode());
11896 if (SI1 != SelectTruncOp[1].
end())
11905 if (!ReallyNeedsExt)
11917 "Invalid extension type");
11928 DAGCombinerInfo &DCI)
const {
11930 "Should be called with a SETCC node");
11955 return DAGCombineTruncBoolExt(N, DCI);
11974 combineElementTruncationToVectorTruncation(
SDNode *N,
11975 DAGCombinerInfo &DCI)
const {
11977 "Should be called with a BUILD_VECTOR node");
11984 "The input operand must be an fp-to-int conversion.");
11993 bool IsSplat =
true;
12004 if (NextConversion != FirstConversion)
12033 DAG.getIntPtrConstant(1, dl));
12048 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
12049 return DAG.
getNode(Opcode, dl, TargetVT, BV);
12062 "Should be called with a BUILD_VECTOR node");
12065 bool InputsAreConsecutiveLoads =
true;
12066 bool InputsAreReverseConsecutive =
true;
12069 bool IsRoundOfExtLoad =
false;
12097 if (IsRoundOfExtLoad && LD2->getExtensionType() !=
ISD::EXTLOAD)
12101 InputsAreConsecutiveLoads =
false;
12103 InputsAreReverseConsecutive =
false;
12106 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
12110 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
12111 "The loads cannot be both consecutive and reverse consecutive.");
12114 IsRoundOfExtLoad ? FirstInput.
getOperand(0) : FirstInput;
12121 if (InputsAreConsecutiveLoads) {
12122 assert(LD1 &&
"Input needs to be a LoadSDNode.");
12127 if (InputsAreReverseConsecutive) {
12128 assert(LDL &&
"Input needs to be a LoadSDNode.");
12130 LDL->getBasePtr(), LDL->getPointerInfo(),
12131 LDL->getAlignment());
12146 SDValue Input, uint64_t Elems,
12147 uint64_t CorrectElems) {
12158 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
12160 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
12161 CorrectElems = CorrectElems >> 8;
12162 Elems = Elems >> 8;
12167 DAG.
getUNDEF(Input.getValueType()), ShuffleMask);
12190 uint64_t TargetElems[] = {
12198 uint64_t Elems = 0;
12202 auto isSExtOfVecExtract = [&](
SDValue Op) ->
bool {
12222 if (Input && Input != Extract.
getOperand(0))
12228 Elems = Elems << 8;
12238 if (!isSExtOfVecExtract(N->
getOperand(i))) {
12245 int TgtElemArrayIdx;
12248 if (InputSize + OutputSize == 40)
12249 TgtElemArrayIdx = 0;
12250 else if (InputSize + OutputSize == 72)
12251 TgtElemArrayIdx = 1;
12252 else if (InputSize + OutputSize == 48)
12253 TgtElemArrayIdx = 2;
12254 else if (InputSize + OutputSize == 80)
12255 TgtElemArrayIdx = 3;
12256 else if (InputSize + OutputSize == 96)
12257 TgtElemArrayIdx = 4;
12261 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
12263 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
12264 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
12265 if (Elems != CorrectElems) {
12273 SDValue PPCTargetLowering::DAGCombineBuildVector(
SDNode *N,
12274 DAGCombinerInfo &DCI)
const {
12276 "Should be called with a BUILD_VECTOR node");
12281 if (!Subtarget.hasVSX())
12289 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
12304 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
12333 if (!Ext1Op || !Ext2Op)
12341 int SecondElem = Ext2Op->getZExtValue();
12343 if (FirstElem == 0 && SecondElem == 1)
12344 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
12345 else if (FirstElem == 2 && SecondElem == 3)
12346 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
12354 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
12358 DAGCombinerInfo &DCI)
const {
12361 "Need an int -> FP conversion node here");
12380 (FirstOperand.getValueType() ==
MVT::i8 ||
12381 FirstOperand.getValueType() ==
MVT::i16);
12382 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
12385 unsigned ConvOp = Signed ?
12391 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
12399 SDValue ExtOps[] = { Ld, WidthConst };
12415 "UINT_TO_FP is supported only with FPCVT");
12431 Subtarget.hasFPCVT()) ||
12436 DCI.AddToWorklist(Src.
getNode());
12452 DCI.AddToWorklist(FP.
getNode());
12501 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->
getAlignment()%16)
12506 SDValue LoadOps[] = { Chain, Base };
12518 if (VecTy != MVT::v2f64) {
12572 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->
getAlignment()%16)
12587 SDValue StoreOps[] = { Chain, Swap, Base };
12590 StoreOps, VecTy, MMO);
12604 &&
"Not a FP_TO_INT Instruction!");
12615 bool ValidTypeForStoreFltAsInt =
12617 (Subtarget.hasP9Vector() && (Op1VT ==
MVT::i16 || Op1VT ==
MVT::i8)));
12619 if (ResVT ==
MVT::ppcf128 || !Subtarget.hasP8Altivec() ||
12620 cast<StoreSDNode>(
N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
12634 Val = DAG.
getNode(ConvOpcode,
12646 cast<StoreSDNode>(
N)->getMemoryVT(),
12647 cast<StoreSDNode>(
N)->getMemOperand());
12660 return combineADD(N, DCI);
12662 return combineSHL(N, DCI);
12664 return combineSRA(N, DCI);
12666 return combineSRL(N, DCI);
12677 if (
C->isNullValue() ||
12678 C->isAllOnesValue())
12685 return DAGCombineExtBoolTrunc(N, DCI);
12687 return combineTRUNCATE(N, DCI);
12689 if (
SDValue CSCC = combineSetCC(N, DCI))
12693 return DAGCombineTruncBoolExt(N, DCI);
12696 return combineFPToIntToFP(N, DCI);
12703 SDValue Val= combineStoreFPToInt(N, DCI);
12709 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode ==
ISD::BSWAP &&
12712 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT ==
MVT::i64))) {
12716 EVT mVT = cast<StoreSDNode>(
N)->getMemoryVT();
12727 if (Op1VT.
bitsGT(mVT)) {
12741 Ops, cast<StoreSDNode>(
N)->getMemoryVT(),
12742 cast<StoreSDNode>(
N)->getMemOperand());
12750 EVT MemVT = cast<StoreSDNode>(
N)->getMemoryVT();
12760 cast<StoreSDNode>(
N)->setTruncatingStore(
true);
12768 if (Subtarget.needsSwapsForVSXMemOps() &&
12781 if (VT.isSimple()) {
12782 MVT LoadVT = VT.getSimpleVT();
12783 if (Subtarget.needsSwapsForVSXMemOps() &&
12796 auto ReplaceTwoFloatLoad = [&]() {
12816 while (UI.getUse().getResNo() != 0) ++UI;
12818 while (UI.getUse().getResNo() != 0) ++UI;
12819 SDNode *RightShift = *UI;
12828 !isa<ConstantSDNode>(RightShift->
getOperand(1)) ||
12849 if (Subtarget.isLittleEndian())
12858 "Non-pre-inc AM on PPC?");
12891 if (ReplaceTwoFloatLoad())
12910 bool isLittleEndian = Subtarget.isLittleEndian();
12937 MVT PermCntlTy, PermTy, LDTy;
12938 if (Subtarget.hasAltivec()) {
12973 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
12977 BaseLoadOps, LDTy, BaseMMO);
12985 int IncOffset = VT.getSizeInBits() / 8;
12986 int IncValue = IncOffset;
13003 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
13007 ExtraLoadOps, LDTy, ExtraMMO);
13018 if (isLittleEndian)
13020 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
13023 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
13026 Perm = Subtarget.hasAltivec() ?
13041 bool isLittleEndian = Subtarget.isLittleEndian();
13042 unsigned IID = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
13045 if ((IID == Intr ||
13062 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
13072 if (isa<ConstantSDNode>(Add->
getOperand(1))) {
13075 UE = BasePtr->
use_end(); UI != UE; ++UI) {
13076 if (UI->getOpcode() ==
ISD::ADD &&
13077 isa<ConstantSDNode>(UI->getOperand(1)) &&
13078 (cast<ConstantSDNode>(Add->
getOperand(1))->getZExtValue() -
13079 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
13080 (1ULL << Bits) == 0) {
13085 cast<ConstantSDNode>(
VI->getOperand(0))->getZExtValue() == IID) {
13132 if (Subtarget.needsSwapsForVSXMemOps()) {
13133 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
13145 if (Subtarget.needsSwapsForVSXMemOps()) {
13146 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
13160 (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
13203 SDNode *VCMPoNode =
nullptr;
13224 SDNode *FlagUser =
nullptr;
13226 FlagUser ==
nullptr; ++UI) {
13229 for (
unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
13230 if (User->getOperand(i) ==
SDValue(VCMPoNode, 1)) {
13240 return SDValue(VCMPoNode, 0);
13248 cast<ConstantSDNode>(Cond.
getOperand(1))->getZExtValue() ==
13255 "Counter decrement has more than one use");
13281 cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue() ==
13283 isa<ConstantSDNode>(RHS)) {
13285 "Counter decrement comparison is not EQ or NE");
13287 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13295 "Counter decrement has more than one use");
13307 assert(isDot &&
"Can't compare against a vector result!");
13311 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13312 if (Val != 0 && Val != 1) {
13320 bool BranchOnWhenPredTrue = (CC ==
ISD::SETEQ) ^ (Val == 0);
13333 switch (cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue()) {
13357 return DAGCombineBuildVector(N, DCI);
13359 return combineABS(N, DCI);
13361 return combineVSelect(N, DCI);
13373 if (VT ==
MVT::i64 && !Subtarget.isPPC64())
13376 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
13382 bool IsNegPow2 = (-Divisor).isPowerOf2();
13403 const APInt &DemandedElts,
13405 unsigned Depth)
const {
13412 Known.
Zero = 0xFFFF0000;
13416 switch (cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue()) {
13442 switch (Subtarget.getDarwinDirective()) {
13460 uint64_t LoopSize = 0;
13462 for (
auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
13468 if (LoopSize > 16 && LoopSize <= 32)
13482 if (Constraint.
size() == 1) {
13483 switch (Constraint[0]) {
13501 }
else if (Constraint ==
"wc") {
13503 }
else if (Constraint ==
"wa" || Constraint ==
"wd" ||
13504 Constraint ==
"wf" || Constraint ==
"ws" ||
13505 Constraint ==
"wi") {
13521 if (!CallOperandVal)
13528 else if ((
StringRef(constraint) ==
"wa" ||
13538 switch (*constraint) {
13568 std::pair<unsigned, const TargetRegisterClass *>
13572 if (Constraint.
size() == 1) {
13574 switch (Constraint[0]) {
13576 if (VT ==
MVT::i64 && Subtarget.isPPC64())
13577 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
13578 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
13580 if (VT ==
MVT::i64 && Subtarget.isPPC64())
13581 return std::make_pair(0U, &PPC::G8RCRegClass);
13582 return std::make_pair(0U, &PPC::GPRCRegClass);
13588 if (Subtarget.hasSPE()) {
13590 return std::make_pair(0U, &PPC::SPE4RCRegClass);
13592 return std::make_pair(0U, &PPC::SPERCRegClass);
13595 return std::make_pair(0U, &PPC::F4RCRegClass);
13597 return std::make_pair(0U, &PPC::F8RCRegClass);
13599 return std::make_pair(0U, &PPC::QFRCRegClass);
13601 return std::make_pair(0U, &PPC::QSRCRegClass);
13606 return std::make_pair(0U, &PPC::QFRCRegClass);
13608 return std::make_pair(0U, &PPC::QSRCRegClass);
13609 if (Subtarget.hasAltivec())
13610 return std::make_pair(0U, &PPC::VRRCRegClass);
13613 return std::make_pair(0U, &PPC::CRRCRegClass);
13615 }
else if (Constraint ==
"wc" && Subtarget.useCRBits()) {
13617 return std::make_pair(0U, &PPC::CRBITRCRegClass);
13618 }
else if ((Constraint ==
"wa" || Constraint ==
"wd" ||
13619 Constraint ==
"wf" || Constraint ==
"wi") &&
13620 Subtarget.hasVSX()) {
13621 return std::make_pair(0U, &PPC::VSRCRegClass);
13622 }
else if (Constraint ==
"ws" && Subtarget.hasVSX()) {
13623 if (VT ==
MVT::f32 && Subtarget.hasP8Vector())
13624 return std::make_pair(0U, &PPC::VSSRCRegClass);
13626 return std::make_pair(0U, &PPC::VSFRCRegClass);
13629 std::pair<unsigned, const TargetRegisterClass *> R =
13638 if (R.first && VT ==
MVT::i64 && Subtarget.isPPC64() &&
13639 PPC::GPRCRegClass.contains(R.first))
13641 PPC::sub_32, &PPC::G8RCRegClass),
13642 &PPC::G8RCRegClass);
13646 R.first = PPC::CR0;
13647 R.second = &PPC::CRRCRegClass;
13656 std::string &Constraint,
13657 std::vector<SDValue>&Ops,
13662 if (Constraint.length() > 1)
return;
13664 char Letter = Constraint[0];
13688 if (isShiftedUInt<16, 16>(Value))
13692 if (isShiftedInt<16, 16>(Value))
13721 Ops.push_back(Result);
13747 switch (AM.
Scale) {
13778 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
13784 bool isPPC64 = Subtarget.isPPC64();
13788 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
13790 DAG.
getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
13798 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
13806 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
13819 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
13821 FrameReg = isPPC64 ? PPC::FP8 :
PPC::FP;
13835 bool isPPC64 = Subtarget.isPPC64();
13836 bool isDarwinABI = Subtarget.isDarwinABI();
13844 .Case(
"r1", is64Bit ? PPC::X1 : PPC::R1)
13845 .
Case(
"r2", (isDarwinABI || isPPC64) ? 0 :
PPC::R2)
13846 .
Case(
"r13", (!isPPC64 && isDarwinABI) ? 0 :
13847 (is64Bit ? PPC::X13 : PPC::R13))
13857 if (Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
13869 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
13874 unsigned char GVFlags = Subtarget.classifyGlobalReference(GV);
13893 unsigned Intrinsic)
const {
13894 switch (Intrinsic) {
13909 switch (Intrinsic) {
13955 switch (Intrinsic) {
13995 switch (Intrinsic) {
14040 switch (Intrinsic) {
14086 unsigned DstAlign,
unsigned SrcAlign,
14087 bool IsMemset,
bool ZeroMemset,
14094 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
14095 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
14102 if (Subtarget.hasAltivec() && Size >= 16 &&
14103 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
14104 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
14108 if (Subtarget.isPPC64()) {
14122 return !(BitSize == 0 || BitSize > 64);
14130 return NumBits1 == 64 && NumBits2 == 32;
14138 return NumBits1 == 64 && NumBits2 == 32;
14145 EVT MemVT =
LD->getMemoryVT();
14147 (Subtarget.isPPC64() && MemVT ==
MVT::i32)) &&
14163 "invalid fpext types");
14181 bool *
Fast)
const {
14195 if (Subtarget.hasVSX()) {
14238 static const MCPhysReg ScratchRegs[] = {
14239 PPC::X12, PPC::LR8, PPC::CTR8, 0
14242 return ScratchRegs;
14246 const Constant *PersonalityFn)
const {
14247 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
14251 const Constant *PersonalityFn)
const {
14252 return Subtarget.isPPC64() ? PPC::X4 :
PPC::R4;
14257 EVT VT ,
unsigned DefinedValues)
const {
14259 return Subtarget.hasDirectMove();
14261 if (Subtarget.hasVSX() || Subtarget.hasQPX())
14282 if (Subtarget.isDarwinABI())
return;
14283 if (!Subtarget.isPPC64())
return;
14304 RC = &PPC::G8RCRegClass;
14305 else if (PPC::F8RCRegClass.
contains(*
I))
14306 RC = &PPC::F8RCRegClass;
14307 else if (PPC::CRRCRegClass.
contains(*
I))
14308 RC = &PPC::CRRCRegClass;
14309 else if (PPC::VRRCRegClass.
contains(*
I))
14310 RC = &PPC::VRRCRegClass;
14322 "Function should be nounwind in insertCopiesSplitCSR!");
14328 for (
auto *Exit : Exits)
14330 TII->
get(TargetOpcode::COPY), *
I)
14337 if (!Subtarget.isTargetLinux())
14344 if (!Subtarget.isTargetLinux())
14349 if (!VT.
isSimple() || !Subtarget.hasVSX())
14373 unsigned TargetOpcode;
14392 if (
Mask->getZExtValue() == OpSizeInBits - 1)
14398 SDValue PPCTargetLowering::combineSHL(
SDNode *N, DAGCombinerInfo &DCI)
const {
14404 if (!Subtarget.isISA3_0() ||
14428 SDValue PPCTargetLowering::combineSRA(
SDNode *N, DAGCombinerInfo &DCI)
const {
14435 SDValue PPCTargetLowering::combineSRL(
SDNode *N, DAGCombinerInfo &DCI)
const {
14454 auto isZextOfCompareWithConstant = [](
SDValue Op) {
14465 int64_t NegConstant = 0 -
Constant->getSExtValue();
14474 bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
14475 bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
14478 if (LHSHasPattern && !RHSHasPattern)
14480 else if (!LHSHasPattern && !RHSHasPattern)
14490 int64_t NegConstant = 0 -
Constant->getSExtValue();
14492 switch(cast<CondCodeSDNode>(Cmp.
getOperand(2))->
get()) {
14529 SDValue PPCTargetLowering::combineADD(
SDNode *N, DAGCombinerInfo &DCI)
const {
14546 DAGCombinerInfo &DCI)
const {
14548 if (Subtarget.useCRBits()) {
14550 if (
SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
14551 return CRTruncValue;
14561 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
14571 EltToExtract = EltToExtract ? 0 : 1;
14581 return DCI.DAG.getNode(
14583 DCI.DAG.getTargetConstant(EltToExtract, dl,
MVT::i32));
14588 bool PPCTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
14590 if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64())
14600 if (Attr.getValueAsString() ==
"true")
14606 if (!TM.Options.GuaranteedTailCallOpt &&
DisableSCO)
14611 if (!Callee || Callee->
isVarArg())
14623 bool PPCTargetLowering::hasBitPreservingFPLogic(
EVT VT)
const {
14624 if (!Subtarget.hasVSX())
14626 if (Subtarget.hasP9Vector() && VT ==
MVT::f128)
14632 bool PPCTargetLowering::
14633 isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI)
const {
14636 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
14638 if (CI->getBitWidth() > 64)
14640 int64_t ConstVal = CI->getZExtValue();
14642 (
isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
14654 SDValue PPCTargetLowering::combineABS(
SDNode *N, DAGCombinerInfo &DCI)
const {
14656 assert(Subtarget.hasP9Altivec() &&
14657 "Only combine this when P9 altivec supported!");
14698 DAGCombinerInfo &DCI)
const {
14700 assert(Subtarget.hasP9Altivec() &&
14701 "Only combine this when P9 altivec supported!");
14748 CmpOpnd1, CmpOpnd2,
14749 DAG.getTargetConstant(0, dl,
MVT::i32));
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
cl::opt< bool > ANDIGlueBug
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
void setFrameAddressIsTaken(bool T)
virtual bool isJumpTableRelative() const
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
User::const_op_iterator arg_iterator
The type of iterator to use when looping over actual arguments at this call site. ...
StringRef getSection() const
Get the custom section of this global if it has one.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode *> &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators...
A parsed version of the target data layout string in and methods for querying it. ...
const_iterator end(StringRef path)
Get end iterator over path.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getValueType() const
Return the ValueType of the referenced return value.
static unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, bool isPatchPoint, bool hasNest, SmallVectorImpl< std::pair< unsigned, SDValue >> &RegsToPass, SmallVectorImpl< SDValue > &Ops, std::vector< EVT > &NodeTys, ImmutableCallSite CS, const PPCSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified amount.
FormattedString left_justify(StringRef Str, unsigned Width)
left_justify - append spaces after string so total output is Width characters.
Return with a flag operand, matched by 'blr'.
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
static Instruction * callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const SDValue & getOffset() const
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
void setVarArgsNumGPR(unsigned Num)
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
C - The default llvm calling convention, compatible with C.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const override
Return the register ID of the name passed in.
uint64_t getZExtValue() const
Get zero extended value.
bool isAccessedAsGotIndirect(SDValue N) const
unsigned MaxLoadsPerMemcmpOptSize
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
static cl::opt< bool > DisableSCO("disable-ppc-sco", cl::desc("disable sibling call optimization on ppc"), cl::Hidden)
QVFPERM = This corresponds to the QPX qvfperm instruction.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
unsigned arg_size() const
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DstReg, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true...
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
Atomic ordering constants.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
int getFramePointerSaveIndex() const
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
This class represents lattice values for constants.
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not...
GPRC = address of GLOBAL_OFFSET_TABLE.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
int getVarArgsStackOffset() const
A Module instance is used to store all the information related to an LLVM module. ...
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
int getSplatIndex() const
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
bool isVector() const
Return true if this is a vector value type.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
class llvm::RegisterBankInfo GPR
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const SDValue & getBasePtr() const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Carry-setting nodes for multiple precision addition and subtraction.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
void setLRStoreRequired()
void push_back(const T &Elt)
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
APInt zext(unsigned width) const
Zero extend to a new width.
static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl)
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
MachineMemOperand::Flags flags
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
This class represents a function call, abstracting a target machine's calling convention.
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
QBRC, CHAIN = QVLFSb CHAIN, Ptr The 4xf32 load used for v4i1 constants.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
static MVT getFloatingPointVT(unsigned BitWidth)
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction...
const SDValue & getChain() const
Function Alias Analysis Results
uint64_t getSize() const
Return the size in bytes of the memory reference.
static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, SDValue GA)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc, or post-dec.
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
unsigned getValNo() const
bool hasAtomicLoad() const
Return true if this atomic instruction loads from memory.
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
unsigned getAlignment() const
unsigned getValueSizeInBits(unsigned ResNo) const
Returns MVT::getSizeInBits(getValueType(ResNo)).
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotAlignment - Calculates the alignment of this argument on the stack.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
constexpr bool isInt< 16 >(int64_t x)
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CALL - A direct function call.
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isVectorTy() const
True if this is an instance of VectorType.
static BranchProbability getOne()
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
This defines the Use class.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
void setVarArgsNumFPR(unsigned Num)
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
Floating-point-to-interger conversion instructions.
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
unsigned getVarArgsNumGPR() const
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
int getReturnAddrSaveIndex() const
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
return AArch64::GPR64RegClass contains(Reg)
SDValue getExternalSymbol(const char *Sym, EVT VT)
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, unsigned NumBytes)
EnsureStackAlignment - Round stack frame size up from NumBytes to ensure minimum alignment required f...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align=1, bool *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Value * getArgOperand(unsigned i) const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
void setVarArgsStackOffset(int Offset)
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool isBeforeLegalize() const
A description of a memory reference used in the backend.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
const_iterator begin() const
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
bool isAfterLegalizeDAG() const
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
Shift and rotation operations.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Class to represent struct types.
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
Base class for LoadSDNode and StoreSDNode.
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef...
bool hasDirectMove() const
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, unsigned MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
static cl::opt< bool > EnableQuadPrecision("enable-ppc-quad-precision", cl::desc("enable quad precision float support on ppc"), cl::Hidden)
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
static bool isLoad(int Opcode)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Base class for the full range of assembler expressions which are needed for parsing.
bool isIntegerTy() const
True if this is an instance of IntegerType.
op_iterator op_end() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
unsigned getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isInConsecutiveRegs() const
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA)...
The memory access is dereferenceable (i.e., doesn't trap).
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Direct move from a GPR to a VSX register (algebraic)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
const BlockAddress * getBlockAddress() const
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
QVALIGNI = This corresponds to the QPX qvaligni instruction.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
zlib-gnu style compression
This file implements a class to represent arbitrary precision integral constant values and operations...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
bool useSoftFloat() const
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
unsigned getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
Context object for machine code objects.
static const unsigned PerfectShuffleTable[6561+1]
ValTy * getCalledValue() const
Return the pointer to function that is being called.
int64_t getSExtValue() const
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
unsigned getScalarSizeInBits() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
An SDNode for Power9 vector absolute value difference.
Type * getType() const
All values are typed, get the type of this value.
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
isVMerge - Common function, used to match vmrg* shuffles.
bool getBoolValue() const
Convert APInt to a boolean value.
MachineFunction & getMachineFunction() const
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
SDValue getRegisterMask(const uint32_t *RegMask)
unsigned MaxLoadsPerMemcmp
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table...
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
bool has64BitSupport() const
has64BitSupport - Return true if the selected CPU supports 64-bit instructions, regardless of whether...
const TargetMachine & getTarget() const
BasicBlock * GetInsertBlock() const
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be represented as an inde...
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
Class to represent array types.
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
MO_NLP_HIDDEN_FLAG - If this bit is set, the symbol reference is to a symbol with hidden visibility...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
SmallVector< ISD::OutputArg, 32 > Outs
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool isLittleEndian() const
Layout endianness...
static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG)
Reduce the number of loads when building a vector.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments, on Darwin.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
getOptimalMemOpType - Returns the target specific optimal type for load and store operations as a res...
op_iterator op_begin() const
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
ArrayRef< SDUse > ops() const
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is 0.0 or -0.0.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, int64_t &Offset, SelectionDAG &DAG)
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
const_iterator end() const
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
bool hasInvariantFunctionDescriptors() const
amdgpu Simplify well known AMD library false Value * Callee
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
Value * getOperand(unsigned i) const
Analysis containing CSE Info
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
unsigned getByValSize() const
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
uint32_t FloatToBits(float Float)
This function takes a float and returns the bit equivalent 32-bit integer.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
TargetInstrInfo - Interface to description of machine instruction set.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
iterator find(const_arg_type_t< KeyT > Val)
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
void setTailCallSPDelta(int size)
static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool getFunctionSections() const
Return true if functions should be emitted into their own section, corresponding to -ffunction-sectio...
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
static bool usePartialVectorLoads(SDNode *N)
Returns true if we should use a direct load into vector instruction (such as lxsd or lfd)...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
const SDValue & getBasePtr() const
A switch()-like statement whose cases are string literals.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
int64_t getOffset() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int)
Check that the mask is shuffling N byte elements.
Control flow instructions. These all have token chains.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, SelectionDAG &DAG)
unsigned const MachineRegisterInfo * MRI
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
FSEL - Traditional three-operand fsel node.
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
LLVM Basic Block Representation.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
unsigned getScalarSizeInBits() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
void resetAll()
Resets the known state of all bits.
MO_NLP_FLAG - If this bit is set, the symbol reference is actually to the non_lazy_ptr for the global...
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
SExtVElems, takes an input vector of a smaller type and sign extends to an output vector of a larger ...
VECINSERT - The PPC vector insert instruction.
Direct move from a VSX register to a GPR.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
static bool is64Bit(const char *name)
const SDValue & getOperand(unsigned Num) const
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Carry-using nodes for multiple precision addition and subtraction.
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
CalculateTailCallSPDiff - Get the amount the stack pointer has to be adjusted to accommodate the argu...
static ManagedStatic< OptionRegistry > OR
ConstantFP - Floating Point Values [float, double].
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the specified isSplatShuffleMask...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
virtual unsigned getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
STFIWX - The STFIWX instruction.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target...
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isAcquireOrStronger(AtomicOrdering ao)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
const SDValue & getOffset() const
void AddToWorklist(SDNode *N)
static mvt_range fp_valuetypes()
const PPCFrameLowering * getFrameLowering() const override
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, bool HasDirectMove, bool HasP8Vector)
Do we have an efficient pattern in a .td file for this node?
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
Store scalar integers from VSR.
This class provides iterator support for SDUse operands that use a specific SDNode.
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
CalculateTailCallArgDest - Remember Argument for later processing.
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, unsigned Alignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, const SDLoc &dl)
EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to the appropriate stack sl...
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock *> &Exits) const override
Insert explicit copies in entry and exit blocks.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setVarArgsFrameIndex(int Index)
TRAP - Trapping instruction.
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
static const MCPhysReg QFPR[]
QFPR - The set of QPX registers that should be allocated for arguments.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
static mvt_range vector_valuetypes()
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget)
getVectorCompareInfo - Given an intrinsic, return false if it is not a vector comparison.
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
Optional< StringRef > getSectionPrefix() const
Get the section prefix for this function.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
self_iterator getIterator()
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
QVESPLATI = This corresponds to the QPX qvesplati instruction.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned NoInfsFPMath
NoInfsFPMath - This flag is enabled when the -enable-no-infs-fp-math flag is specified on the command...
Common code between 32-bit and 64-bit PowerPC targets.
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1...
std::vector< ArgListEntry > ArgListTy
unsigned getAlignment() const
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void setMinReservedArea(unsigned size)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isLittleEndian() const
This structure contains all information that is necessary for lowering calls.
bool isPositionIndependent() const
unsigned getDarwinDirective() const
getDarwinDirective - Returns the -m directive specified for the cpu.
const TargetMachine & getTargetMachine() const
StringRef getSection() const
This class contains a discriminated union of information about pointers in memory operands...
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setUseUnderscoreLongJmp(bool Val)
Indicate whether this target prefers to use _longjmp to implement llvm.longjmp or the version without...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, SDValue Input, uint64_t Elems, uint64_t CorrectElems)
uint64_t getAlignment() const
Return the minimum known alignment in bytes of the actual memory reference.
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
const uint32_t * getNoPreservedMask() const override
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
The memory access writes data.
bool isReleaseOrStronger(AtomicOrdering ao)
Extract a subvector from unsigned integer vector and convert to FP.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
QBFLT = Access the underlying QPX floating-point boolean representation.
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef...
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate...
const PPCRegisterInfo * getRegisterInfo() const override
void addLiveInAttr(unsigned VReg, ISD::ArgFlagsTy Flags)
This function associates attributes for each live-in virtual register.
const TargetLowering & getTargetLoweringInfo() const
Iterator for intrusive lists based on ilist_node.
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize, unsigned LinkageSize, unsigned ParamAreaSize, unsigned &ArgOffset, unsigned &AvailableFPRs, unsigned &AvailableVRs, bool HasQPX)
CalculateStackSlotUsed - Return whether this argument will use its stack slot (instead of being passe...
CCState - This class holds information needed while lowering arguments and return values...
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
void setNode(SDNode *N)
set the SDNode
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
static bool needStackSlotPassParameters(const PPCSubtarget &Subtarget, const SmallVectorImpl< ISD::OutputArg > &Outs)
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
bool isJumpTableRelative() const override
EVT getVectorElementType() const
Given a vector type, return the type of each element.
void setIsSplitCSR(bool s)
XXSPLT - The PPC VSX splat instructions.
bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
VECSHL - The PPC vector shift left instruction.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and rounds it to a floating point val...
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
Module.h This file contains the declarations for the Module class.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Provides information about what library functions are available for the current target.
bool isVector(MCInstrInfo const &MCII, MCInst const &MCI)
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
BRCOND - Conditional branch.
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotSize - Calculates the size reserved for this argument on the stack.
Byte Swap and Counting operators.
static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildSplatI - Build a canonical splati of Val with an element size of SplatSize.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, const SDLoc &dl, EVT DestVT=MVT::Other)
BuildIntrinsicOp - Return a unary operator intrinsic node with the specified intrinsic ID...
CHAIN = SC CHAIN, Imm128 - System call.
This is an abstract virtual class for memory operations.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node. ...
const Constant * getConstVal() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always beneficiates from combining into FMA for a given value type...
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
isCallCompatibleAddress - Return the immediate to use if the specified 32-bit value is representable ...
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
VPERM - The PPC VPERM Instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction...
bool hasP8Altivec() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
const Function & getFunction() const
Return the LLVM function that this machine code represents.
STXSIX - The STXSI[bh]X instruction.
unsigned getMinReservedArea() const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
The access may modify the value stored in memory.
i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after execu...
MachinePointerInfo getWithOffset(int64_t O) const
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isDereferenceable() const
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
Class to represent vector types.
bool hasP9Altivec() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
EVT getMemoryVT() const
Return the type of the in-memory value.
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
unsigned getByValAlign() const
CodeModel::Model getCodeModel() const
Returns the code model.
bool hasRecipPrec() const
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
QVGPCI = This corresponds to the QPX qvgpci instruction.
iterator_range< use_iterator > uses()
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int getVarArgsFrameIndex() const
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static use_iterator use_end()
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
ANY_EXTEND - Used for integer types. The high bits are undefined.
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2...
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(StringLiteral S, T Value)
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, bool Swap, SDLoc &DL, SelectionDAG &DAG)
This function is called when we have proved that a SETCC node can be replaced by subtraction (and oth...
bool use64BitRegs() const
use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit registers in 32-bit mode when...
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
int getMaskElt(unsigned Idx) const
bool enableMachineScheduler() const override
amdgpu Simplify well known AMD library false Value Value * Arg
GPRC = address of GLOBAL_OFFSET_TABLE.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as an MCExpr.
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static const fltSemantics & PPCDoubleDouble() LLVM_READNONE
BR_JT - Jumptable branch.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack...
These are IR-level optimization flags that may be propagated to SDNodes.
Represents a use of a SDNode.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
SmallVector< SDValue, 32 > OutVals
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isVector() const
Return true if this is a vector value type.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
Bitwise operators - logical and, logical or, logical xor.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getLocMemOffset() const
Reciprocal estimate instructions (unary FP ops).
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
LLVM_NODISCARD bool empty() const
static bool isFPExtLoad(SDValue Op)
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
static bool isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width)
bool isInConsecutiveRegsLast() const
CallingConv::ID getCallingConv() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
bool hasLazyResolverStub(const GlobalValue *GV) const
hasLazyResolverStub - Return true if accesses to the specified global have to go through a dyld lazy ...
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
Establish a view to a call site for examination.
int64_t getOffset() const
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
const Function * getParent() const
Return the enclosing method, or null if none.
static MachineOperand CreateImm(int64_t Val)
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
Direct move from a GPR to a VSX register (zero)
static bool callsShareTOCBase(const Function *Caller, SDValue Callee, const TargetMachine &TM)
const TargetSubtargetInfo & getSubtarget() const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Flags getFlags() const
Return the raw flags of the source value,.
The CMPB instruction (takes two operands of i32 or i64).
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
The memory access always returns the same value (or traps).
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction. ...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
block_iterator block_end() const
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
static void setUsesTOCBasePtr(MachineFunction &MF)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
TC_RETURN - A tail call return.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool useCRBits() const
useCRBits - Return true if we should store and manipulate i1 values in the individual condition regis...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
constexpr bool isUInt< 16 >(uint64_t x)
bool isDarwin() const
isDarwin - True if this is any darwin platform.
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
static bool isConstantOrUndef(int Op, int Val)
isConstantOrUndef - Op is either an undef node or a ConstantSDNode.
static bool isFunctionGlobalAddress(SDValue Callee)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
const MachinePointerInfo & getPointerInfo() const
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
unsigned getLinkageSize() const
getLinkageSize - Return the size of the PowerPC ABI linkage area.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
XXREVERSE - The PPC VSX reverse instruction.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG)
void setReturnAddressIsTaken(bool s)
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
Direct move of 2 consective GPR to a VSX register.
POPCNTDKind hasPOPCNTD() const
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
ArrayRef< int > getMask() const
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
FMA - Perform a * b + c with no intermediate rounding step.
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, const SDLoc &dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
SDValue getRegister(unsigned Reg, EVT VT)
void setUseUnderscoreSetJmp(bool Val)
Indicate whether this target prefers to use _setjmp to implement llvm.setjmp or the version without _...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2...
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
SDValue getValueType(EVT)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
PREFETCH - This corresponds to a prefetch intrinsic.
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
bool isNonTemporal() const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
These nodes represent PPC shifts.
static bool areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, CallingConv::ID CalleeCC)
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone...
Primary interface to the complete machine description for the target machine.
PICLevel::Level getPICLevel() const
Returns the PIC level (small or large model)
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=nullptr)
Return true if we should reference labels using a PICBase, set the HiOpFlags and LoOpFlags to the tar...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
StringRef - Represent a constant reference to a string, i.e.
SetCC operator - This evaluates to a true value iff the condition is true.
static bool isSplat(ArrayRef< Value *> VL)
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static BranchProbability getZero()
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getNumOperands() const
Extract a subvector from signed integer vector and convert to FP.
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
unsigned getLocReg() const
uint64_t getZExtValue() const
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
TRUNCATE - Completely drop the high bits.
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
unsigned getLiveInVirtReg(unsigned PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in physical ...
const MachineOperand & getOperand(unsigned i) const
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
On a symbol operand "FOO", this indicates that the reference is actually to "FOO@plt".
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
unsigned getVarArgsNumFPR() const
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
block_iterator block_begin() const
Perform various unary floating-point operations inspired by libm.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the altivec VCMP*o instructions.
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
const SDValue & getBasePtr() const
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as an MCExpr.
LLVMContext * getContext() const
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate...
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static Constant * get(ArrayRef< Constant *> V)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
MO_PIC_FLAG - If this bit is set, the symbol reference is relative to the function's picbase...
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static bool hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS)
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
XXPERMDI - The PPC XXPERMDI instruction.
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
int isQVALIGNIShuffleMask(SDNode *N)
If this is a qvaligni shuffle mask, return the shift amount, otherwise return -1. ...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
bool useSoftFloat() const override