31 #define DEBUG_TYPE "systemz-lower" 37 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
616 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
639 switch (II->getIntrinsicID()) {
648 if (isa<LoadInst>(I) && I->
hasOneUse()) {
650 if (SingleUser->getParent() == I->
getParent()) {
651 if (isa<ICmpInst>(SingleUser)) {
652 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
653 if (
C->getBitWidth() <= 64 &&
657 }
else if (isa<StoreInst>(SingleUser))
661 }
else if (
auto *StoreI = dyn_cast<StoreInst>(I)) {
662 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
663 if (LoadI->hasOneUse() && LoadI->getParent() == I->
getParent())
668 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) {
679 bool IsVectorAccess = MemAccessTy->
isVectorTy();
683 if (!IsVectorAccess && isa<StoreInst>(I)) {
685 if (isa<ExtractElementInst>(DataOp))
686 IsVectorAccess =
true;
691 if (!IsVectorAccess && isa<LoadInst>(I) && I->
hasOneUse()) {
693 if (isa<InsertElementInst>(LoadUser))
694 IsVectorAccess =
true;
697 if (IsFPAccess || IsVectorAccess)
724 return AM.
Scale == 0;
735 return FromBits > ToBits;
743 return FromBits > ToBits;
752 if (Constraint.
size() == 1) {
753 switch (Constraint[0]) {
785 const char *constraint)
const {
794 switch (*constraint) {
814 Subtarget.hasVector())
819 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
825 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
826 if (isUInt<12>(
C->getZExtValue()))
831 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
837 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
838 if (isInt<20>(
C->getSExtValue()))
843 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
844 if (
C->getZExtValue() == 0x7fffffff)
854 static std::pair<unsigned, const TargetRegisterClass *>
856 const unsigned *Map,
unsigned Size) {
857 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
858 if (isdigit(Constraint[2])) {
861 Constraint.
slice(2, Constraint.
size() - 1).getAsInteger(10, Index);
862 if (!Failed && Index < Size && Map[Index])
863 return std::make_pair(Map[Index], RC);
865 return std::make_pair(0U,
nullptr);
868 std::pair<unsigned, const TargetRegisterClass *>
871 if (Constraint.
size() == 1) {
873 switch (Constraint[0]) {
878 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
880 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
881 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
885 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
887 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
888 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
891 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
895 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
897 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
898 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
901 if (Subtarget.hasVector()) {
903 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
905 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
906 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
911 if (Constraint.
size() > 0 && Constraint[0] ==
'{') {
916 if (Constraint[1] ==
'r') {
926 if (Constraint[1] ==
'f') {
936 if (Constraint[1] ==
'v') {
952 std::vector<SDValue> &Ops,
955 if (Constraint.length() == 1) {
956 switch (Constraint[0]) {
958 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
965 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
966 if (isUInt<12>(
C->getZExtValue()))
972 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
979 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
980 if (isInt<20>(
C->getSExtValue()))
986 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
987 if (
C->getZExtValue() == 0x7fffffff)
1000 #include "SystemZGenCallingConv.inc" 1004 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1010 Type *ToType)
const {
1027 for (
unsigned i = 0; i < Ins.
size(); ++i)
1032 for (
unsigned i = 0; i < Outs.
size(); ++i)
1106 if (Subtarget.hasVector())
1114 unsigned NumFixedGPRs = 0;
1115 unsigned NumFixedFPRs = 0;
1116 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1129 RC = &SystemZ::GR32BitRegClass;
1133 RC = &SystemZ::GR64BitRegClass;
1137 RC = &SystemZ::FP32BitRegClass;
1141 RC = &SystemZ::FP64BitRegClass;
1149 RC = &SystemZ::VR128BitRegClass;
1170 ArgValue = DAG.
getLoad(LocVT, DL, Chain, FIN,
1181 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1182 assert (Ins[
I].PartOffset == 0);
1183 while (
I + 1 !=
E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1185 unsigned PartOffset = Ins[
I + 1].PartOffset;
1203 int64_t StackSize = CCInfo.getNextStackOffset();
1208 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
1221 &SystemZ::FP64BitRegClass);
1229 SystemZ::NumArgFPRs-NumFixedFPRs));
1242 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1249 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1251 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1274 if (Subtarget.hasVector()) {
1290 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1300 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1307 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1309 DAG.
getStore(Chain, DL, ArgValue, SpillSlot,
1313 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1314 assert (Outs[
I].PartOffset == 0);
1315 while (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1316 SDValue PartValue = OutVals[
I + 1];
1317 unsigned PartOffset = Outs[
I + 1].PartOffset;
1321 DAG.
getStore(Chain, DL, PartValue, Address,
1325 ArgValue = SpillSlot;
1352 if (!MemOpChains.
empty())
1359 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1362 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1365 }
else if (IsTailCall) {
1366 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1372 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I) {
1385 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I)
1387 RegsToPass[I].second.getValueType()));
1392 assert(Mask &&
"Missing call preserved mask for calling convention");
1419 for (
unsigned I = 0,
E = RetLocs.
size(); I !=
E; ++
I) {
1442 if (Subtarget.hasVector())
1447 for (
auto &Out : Outs)
1452 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1453 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
1465 if (Subtarget.hasVector())
1474 if (RetLocs.
empty())
1481 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
1493 Chain = DAG.
getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1510 unsigned &CCValid) {
1511 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue();
1537 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
1687 for (
unsigned I = 2;
I < NumOps; ++
I)
1706 for (
unsigned I = 1;
I < NumOps; ++
I)
1720 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1721 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1722 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1751 int64_t
Value = ConstOp1->getSExtValue();
1757 C.Op1 = DAG.
getConstant(0, DL, C.Op1.getValueType());
1767 if (!C.Op0.hasOneUse() ||
1773 auto *
Load = cast<LoadSDNode>(C.Op0);
1774 unsigned NumBits =
Load->getMemoryVT().getStoreSizeInBits();
1775 if (NumBits != 8 && NumBits != 16)
1780 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1781 uint64_t
Value = ConstOp1->getZExtValue();
1782 uint64_t
Mask = (1 << NumBits) - 1;
1785 int64_t SignedValue = ConstOp1->getSExtValue();
1786 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
1792 }
else if (NumBits == 8) {
1818 if (C.Op0.getValueType() !=
MVT::i32 ||
1821 Load->getBasePtr(),
Load->getPointerInfo(),
1822 Load->getMemoryVT(),
Load->getAlignment(),
1823 Load->getMemOperand()->getFlags());
1829 if (C.Op1.getValueType() !=
MVT::i32 ||
1830 Value != ConstOp1->getZExtValue())
1843 switch (
Load->getExtensionType()) {
1866 if (isa<ConstantFPSDNode>(C.Op1))
1872 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
1901 unsigned Opcode0 = C.Op0.getOpcode();
1909 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
1931 for (
auto I = C.Op0->use_begin(),
E = C.Op0->use_end();
I !=
E; ++
I) {
1950 if (C1 && C1->isZero()) {
1951 for (
auto I = C.Op0->use_begin(),
E = C.Op0->use_end();
I !=
E; ++
I) {
1970 if (C.Op0.getOpcode() ==
ISD::SHL &&
1971 C.Op0.getValueType() ==
MVT::i64 &&
1973 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1975 if (C1 && C1->getZExtValue() == 32) {
1976 SDValue ShlOp0 = C.Op0.getOperand(0);
1996 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
1998 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1999 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
2000 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
2001 unsigned Type = L->getExtensionType();
2004 C.Op0 = C.Op0.getOperand(0);
2005 C.Op1 = DAG.
getConstant(0, DL, C.Op0.getValueType());
2033 uint64_t
Mask, uint64_t CmpVal,
2034 unsigned ICmpType) {
2035 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2044 uint64_t
High = uint64_t(1) << HighShift;
2058 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
2064 if (EffectivelyUnsigned && CmpVal < Low) {
2072 if (CmpVal == Mask) {
2078 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
2084 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
2092 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
2098 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
2107 if (Mask == Low + High) {
2136 if (C.Op0.getOpcode() ==
ISD::AND) {
2137 NewC.Op0 = C.Op0.getOperand(0);
2138 NewC.Op1 = C.Op0.getOperand(1);
2147 if (NewC.Op0.getValueType() !=
MVT::i64 ||
2155 if (CmpVal == uint64_t(-1))
2162 MaskVal = -(CmpVal & -CmpVal);
2170 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2171 unsigned NewCCMask, ShiftVal;
2173 NewC.Op0.getOpcode() ==
ISD::SHL &&
2175 (MaskVal >> ShiftVal != 0) &&
2176 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2178 MaskVal >> ShiftVal,
2181 NewC.Op0 = NewC.Op0.getOperand(0);
2182 MaskVal >>= ShiftVal;
2184 NewC.Op0.getOpcode() ==
ISD::SRL &&
2186 (MaskVal << ShiftVal != 0) &&
2187 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2189 MaskVal << ShiftVal,
2192 NewC.Op0 = NewC.Op0.getOperand(0);
2193 MaskVal <<= ShiftVal;
2207 C.Op1 = DAG.
getConstant(MaskVal, DL, C.Op0.getValueType());
2209 C.CCMask = NewCCMask;
2223 if ((~Known.
Zero).getZExtValue() & ~
Mask->getZExtValue())
2226 C.Op0 = C.Op0.getOperand(0);
2234 SDValue Call,
unsigned CCValid, uint64_t CC,
2238 C.CCValid = CCValid;
2241 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2244 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2248 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2251 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2255 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2258 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2261 C.CCMask &= CCValid;
2269 uint64_t
Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2270 unsigned Opcode, CCValid;
2280 Comparison
C(CmpOp0, CmpOp1);
2282 if (C.Op0.getValueType().isFloatingPoint()) {
2322 if (!C.Op1.getNode()) {
2324 switch (C.Op0.getOpcode()) {
2379 unsigned CCValid,
unsigned CCMask) {
2436 int Mask[] = { Start, -1, Start + 1, -1 };
2451 !Subtarget.hasVectorEnhancements1()) {
2460 return DAG.
getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2471 bool Invert =
false;
2479 assert(IsFP &&
"Unexpected integer comparison");
2491 assert(IsFP &&
"Unexpected integer comparison");
2503 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
2507 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
2530 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2532 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2534 return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
2544 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2556 cast<ConstantSDNode>(Neg.
getOperand(0))->getZExtValue() == 0 &&
2560 Pos.getOperand(0) == CmpOp)));
2582 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2591 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2614 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
2616 uint64_t Anchor = Offset & ~uint64_t(0xfff);
2622 if (Offset != 0 && (Offset & 1) == 0) {
2654 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
2656 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
2675 assert(Mask &&
"Missing call preserved mask for calling convention");
2683 Chain = DAG.
getNode(Opcode, DL, NodeTys, Ops);
2690 SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &DL,
2718 SDValue TP = lowerThreadPointer(DL, DAG);
2844 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2850 if (!BackChainIdx) {
2853 FI->setFramePointerSaveIndex(BackChainIdx);
2875 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2884 unsigned LinkReg = MF.
addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
2898 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
2901 LoadN->getBasePtr(), LoadN->getMemOperand());
2909 if (Subtarget.hasHighWord()) {
2928 if (Subtarget.hasHighWord())
2951 const unsigned NumFields = 4;
2962 for (
unsigned I = 0;
I < NumFields; ++
I) {
2967 MemOps[
I] = DAG.
getStore(Chain, DL, Fields[
I], FieldAddr,
2979 const Value *DstSV = cast<SrcValueSDNode>(Op.
getOperand(3))->getValue();
2980 const Value *SrcSV = cast<SrcValueSDNode>(Op.
getOperand(4))->getValue();
2989 SDValue SystemZTargetLowering::
3003 uint64_t AlignVal = (RealignOpt ?
3007 uint64_t RequiredAlign =
std::max(AlignVal, StackAlign);
3008 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3022 if (ExtraAlignSpace)
3039 if (RequiredAlign > StackAlign) {
3051 SDValue Ops[2] = { Result, Chain };
3055 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3072 else if (Subtarget.hasMiscellaneousExtensions2())
3101 LL, RL, Ops[1], Ops[0]);
3177 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
3179 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
3194 int64_t
Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3204 uint64_t
Mask = cast<ConstantSDNode>(HighOp.
getOperand(1))->getZExtValue();
3226 unsigned BaseOp = 0;
3227 unsigned CCValid = 0;
3228 unsigned CCMask = 0;
3279 unsigned BaseOp = 0;
3280 unsigned CCValid = 0;
3281 unsigned CCMask = 0;
3354 unsigned NumSignificantBits = (~Known.
Zero).getActiveBits();
3355 if (NumSignificantBits == 0)
3360 int64_t BitSize = (int64_t)1 <<
Log2_32_Ceil(NumSignificantBits);
3361 BitSize = std::min(BitSize, OrigBitSize);
3370 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
3372 if (BitSize != OrigBitSize)
3374 DAG.
getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3390 cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue());
3392 cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue());
3410 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3412 Node->getChain(), Node->getBasePtr(),
3413 Node->getMemoryVT(), Node->getMemOperand());
3419 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3421 Node->getBasePtr(), Node->getMemoryVT(),
3422 Node->getMemOperand());
3435 unsigned Opcode)
const {
3436 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3439 EVT NarrowVT = Node->getMemoryVT();
3441 if (NarrowVT == WideVT)
3444 int64_t BitSize = NarrowVT.getSizeInBits();
3445 SDValue ChainIn = Node->getChain();
3446 SDValue Addr = Node->getBasePtr();
3447 SDValue Src2 = Node->getVal();
3454 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3489 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3509 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3510 EVT MemVT = Node->getMemoryVT();
3514 SDValue Src2 = Node->getVal();
3518 if (
auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3521 int64_t
Value = (-Op2->getAPIntValue()).getSExtValue();
3522 if (
isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3524 }
else if (Subtarget.hasInterlockedAccess1())
3531 Node->getChain(), Node->getBasePtr(), NegSrc2,
3532 Node->getMemOperand());
3544 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3554 EVT NarrowVT = Node->getMemoryVT();
3556 if (NarrowVT == WideVT) {
3558 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
3560 DL, Tys, Ops, NarrowVT, MMO);
3592 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3593 NegBitShift, DAG.
getConstant(BitSize, DL, WideVT) };
3595 VTList, Ops, NarrowVT, MMO);
3624 if (StoreBackchain) {
3629 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
3639 bool IsData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
3645 bool IsWrite = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
3647 auto *Node = cast<MemIntrinsicSDNode>(Op.
getNode());
3655 Node->getMemoryVT(), Node->getMemOperand());
3667 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
3669 unsigned Opcode, CCValid;
3682 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
3684 unsigned Opcode, CCValid;
3694 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
3697 return lowerThreadPointer(
SDLoc(Op), DAG);
3759 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3762 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
3765 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
3768 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
3771 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
3774 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
3777 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
3780 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
3783 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
3786 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
3789 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
3792 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
3795 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
3809 OpNo0 = OpNo1 = OpNos[1];
3810 }
else if (OpNos[1] < 0) {
3811 OpNo0 = OpNo1 = OpNos[0];
3829 unsigned &OpNo0,
unsigned &OpNo1) {
3830 int OpNos[] = { -1, -1 };
3837 if ((Elt ^ P.Bytes[
I]) & (SystemZ::VectorBytes - 1))
3843 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3845 OpNos[ModelOpNo] = RealOpNo;
3853 unsigned &OpNo0,
unsigned &OpNo1) {
3854 for (
auto &
P : PermuteForms)
3870 int Elt = Bytes[
From];
3873 Transform[
From] = -1;
3875 while (P.Bytes[To] != Elt) {
3877 if (To == SystemZ::VectorBytes)
3880 Transform[
From] = To;
3889 for (
auto &
P : PermuteForms)
3903 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
3904 Bytes.
resize(NumElements * BytesPerElement, -1);
3905 for (
unsigned I = 0;
I < NumElements; ++
I) {
3906 int Index = VSN->getMaskElt(
I);
3908 for (
unsigned J = 0; J < BytesPerElement; ++J)
3909 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
3914 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
3916 Bytes.
resize(NumElements * BytesPerElement, -1);
3917 for (
unsigned I = 0;
I < NumElements; ++
I)
3918 for (
unsigned J = 0; J < BytesPerElement; ++J)
3919 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
3930 unsigned BytesPerElement,
int &
Base) {
3932 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
3933 if (Bytes[Start +
I] >= 0) {
3934 unsigned Elem = Bytes[Start +
I];
3938 if (
unsigned(Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
3940 }
else if (
unsigned(Base) != Elem -
I)
3953 unsigned &StartIndex,
unsigned &OpNo0,
3955 int OpNos[] = { -1, -1 };
3957 for (
unsigned I = 0;
I < 16; ++
I) {
3964 Shift = ExpectedShift;
3965 else if (Shift != ExpectedShift)
3969 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3971 OpNos[ModelOpNo] = RealOpNo;
4001 Op = DAG.
getNode(P.Opcode, DL, InVT, Op0, Op1);
4012 for (
unsigned I = 0;
I < 2; ++
I)
4016 unsigned StartIndex, OpNo0, OpNo1;
4034 struct GeneralShuffle {
4035 GeneralShuffle(
EVT vt) : VT(vt) {}
4054 void GeneralShuffle::addUndef() {
4056 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
4057 Bytes.push_back(-1);
4077 if (FromBytesPerElement < BytesPerElement)
4081 (FromBytesPerElement - BytesPerElement));
4111 for (; OpNo < Ops.size(); ++OpNo)
4112 if (Ops[OpNo] == Op)
4114 if (OpNo == Ops.size())
4119 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
4120 Bytes.push_back(Base +
I);
4129 if (Ops.size() == 0)
4133 if (Ops.size() == 1)
4145 unsigned Stride = 1;
4146 for (; Stride * 2 < Ops.size(); Stride *= 2) {
4147 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
4148 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
4157 else if (OpNo ==
I + Stride)
4158 NewBytes[J] = SystemZ::VectorBytes + Byte;
4168 if (NewBytes[J] >= 0) {
4169 assert(
unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
4170 "Invalid double permute");
4171 Bytes[J] =
I * SystemZ::VectorBytes + NewBytesMap[J];
4173 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
4179 if (NewBytes[J] >= 0)
4180 Bytes[J] =
I * SystemZ::VectorBytes + J;
4187 Ops[1] = Ops[Stride];
4189 if (Bytes[
I] >=
int(SystemZ::VectorBytes))
4190 Bytes[
I] -= (Stride - 1) * SystemZ::VectorBytes;
4195 unsigned OpNo0, OpNo1;
4274 Value = cast<ConstantSDNode>(Op)->getZExtValue();
4276 Value = (cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
4280 for (
unsigned J = 0; J < BytesPerElement; ++J) {
4281 uint64_t Byte = (Value >> (J * 8)) & 0xff;
4283 Mask |= 1ULL << ((
E -
I - 1) * BytesPerElement + J);
4300 unsigned BitsPerElement) {
4304 int64_t SignedValue =
SignExtend64(Value, BitsPerElement);
4316 unsigned Start, End;
4317 if (TII->
isRxSBGMask(Value, BitsPerElement, Start, End)) {
4322 Start -= 64 - BitsPerElement;
4323 End -= 64 - BitsPerElement;
4348 GeneralShuffle GS(VT);
4350 bool FoundOne =
false;
4351 for (
unsigned I = 0;
I < NumElements; ++
I) {
4357 unsigned Elem = cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue();
4375 if (!ResidueOps.
empty()) {
4376 while (ResidueOps.
size() < NumElements)
4378 for (
auto &Op : GS.Ops) {
4385 return GS.getNode(DAG,
SDLoc(BVN));
4393 unsigned int NumElements = Elems.
size();
4394 unsigned int Count = 0;
4395 for (
auto Elem : Elems) {
4396 if (!Elem.isUndef()) {
4399 else if (Elem != Single) {
4423 bool AllLoads =
true;
4424 for (
auto Elem : Elems)
4425 if (Elem.getOpcode() !=
ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) {
4432 return joinDwords(DAG, DL, Elems[0], Elems[1]);
4467 unsigned NumConstants = 0;
4468 for (
unsigned I = 0;
I < NumElements; ++
I) {
4473 Constants[
I] = Elem;
4482 if (NumConstants > 0) {
4483 for (
unsigned I = 0;
I < NumElements; ++
I)
4484 if (!Constants[
I].getNode())
4485 Constants[
I] = DAG.
getUNDEF(Elems[
I].getValueType());
4494 std::map<const SDNode*, unsigned> UseCounts;
4495 SDNode *LoadMaxUses =
nullptr;
4496 for (
unsigned I = 0;
I < NumElements; ++
I)
4498 cast<LoadSDNode>(Elems[
I])->isUnindexed()) {
4499 SDNode *Ld = Elems[
I].getNode();
4501 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
4504 if (LoadMaxUses !=
nullptr) {
4505 ReplicatedVal =
SDValue(LoadMaxUses, 0);
4509 unsigned I1 = NumElements / 2 - 1;
4510 unsigned I2 = NumElements - 1;
4511 bool Def1 = !Elems[I1].
isUndef();
4512 bool Def2 = !Elems[I2].isUndef();
4514 SDValue Elem1 = Elems[Def1 ? I1 : I2];
4515 SDValue Elem2 = Elems[Def2 ? I2 : I1];
4526 for (
unsigned I = 0;
I < NumElements; ++
I)
4527 if (!Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
4537 auto *BVN = cast<BuildVectorSDNode>(Op.
getNode());
4541 if (BVN->isConstant()) {
4554 APInt SplatBits, SplatUndef;
4555 unsigned SplatBitSize;
4557 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4559 SplatBitSize <= 64) {
4566 uint64_t
Lower = (SplatUndefZ
4568 uint64_t
Upper = (SplatUndefZ
4569 & ~((uint64_t(1) <<
findLastSet(SplatBitsZ)) - 1));
4579 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
4580 Value = SplatBitsZ | Middle;
4601 for (
unsigned I = 0;
I < NumElements; ++
I)
4608 auto *VSN = cast<ShuffleVectorSDNode>(Op.
getNode());
4613 if (VSN->isSplat()) {
4615 unsigned Index = VSN->getSplatIndex();
4617 "Splat index should be defined and in first operand");
4627 GeneralShuffle GS(VT);
4628 for (
unsigned I = 0;
I < NumElements; ++
I) {
4629 int Elt = VSN->getMaskElt(
I);
4632 else if (!GS.add(Op.
getOperand(
unsigned(Elt) / NumElements),
4636 return GS.getNode(DAG,
SDLoc(VSN));
4648 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
4664 uint64_t
Index = cast<ConstantSDNode>(Op2)->getZExtValue();
4680 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
4690 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
4691 uint64_t
Index = CIndexN->getZExtValue();
4707 unsigned UnpackHigh)
const {
4717 PackedOp = DAG.
getNode(UnpackHigh,
SDLoc(PackedOp), OutVT, PackedOp);
4718 }
while (FromBits != ToBits);
4723 unsigned ByScalar)
const {
4732 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
4733 APInt SplatBits, SplatUndef;
4734 unsigned SplatBitSize;
4738 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4739 ElemBitSize,
true) &&
4740 SplatBitSize == ElemBitSize) {
4743 return DAG.
getNode(ByScalar, DL, VT, Op0, Shift);
4747 SDValue Splat = BVN->getSplatValue(&UndefElements);
4752 return DAG.
getNode(ByScalar, DL, VT, Op0, Shift);
4758 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
4759 if (VSN->isSplat()) {
4761 unsigned Index = VSN->getSplatIndex();
4763 "Splat index should be defined and in first operand");
4770 return DAG.
getNode(ByScalar, DL, VT, Op0, Shift);
4783 return lowerFRAMEADDR(Op, DAG);
4785 return lowerRETURNADDR(Op, DAG);
4787 return lowerBR_CC(Op, DAG);
4789 return lowerSELECT_CC(Op, DAG);
4791 return lowerSETCC(Op, DAG);
4793 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
4795 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
4797 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
4799 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
4801 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
4803 return lowerBITCAST(Op, DAG);
4805 return lowerVASTART(Op, DAG);
4807 return lowerVACOPY(Op, DAG);
4809 return lowerDYNAMIC_STACKALLOC(Op, DAG);
4811 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
4813 return lowerSMUL_LOHI(Op, DAG);
4815 return lowerUMUL_LOHI(Op, DAG);
4817 return lowerSDIVREM(Op, DAG);
4819 return lowerUDIVREM(Op, DAG);
4824 return lowerXALUO(Op, DAG);
4827 return lowerADDSUBCARRY(Op, DAG);
4829 return lowerOR(Op, DAG);
4831 return lowerCTPOP(Op, DAG);
4833 return lowerATOMIC_FENCE(Op, DAG);
4837 return lowerATOMIC_STORE(Op, DAG);
4839 return lowerATOMIC_LOAD(Op, DAG);
4843 return lowerATOMIC_LOAD_SUB(Op, DAG);
4861 return lowerATOMIC_CMP_SWAP(Op, DAG);
4863 return lowerSTACKSAVE(Op, DAG);
4865 return lowerSTACKRESTORE(Op, DAG);
4867 return lowerPREFETCH(Op, DAG);
4869 return lowerINTRINSIC_W_CHAIN(Op, DAG);
4871 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
4873 return lowerBUILD_VECTOR(Op, DAG);
4875 return lowerVECTOR_SHUFFLE(Op, DAG);
4877 return lowerSCALAR_TO_VECTOR(Op, DAG);
4879 return lowerINSERT_VECTOR_ELT(Op, DAG);
4881 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4947 if (cast<AtomicSDNode>(N)->getOrdering() ==
4984 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 5104 bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
5105 if (!Subtarget.hasVector())
5116 SDValue SystemZTargetLowering::combineExtract(
const SDLoc &DL,
EVT ResVT,
5141 BytesPerElement, First))
5148 if (Byte % BytesPerElement != 0)
5151 Index = Byte / BytesPerElement;
5160 if (OpBytesPerElement < BytesPerElement)
5164 unsigned End = (Index + 1) * BytesPerElement;
5165 if (End % OpBytesPerElement != 0)
5168 Op = Op.
getOperand(End / OpBytesPerElement - 1);
5191 unsigned Byte = Index * BytesPerElement;
5192 unsigned SubByte = Byte % ExtBytesPerElement;
5193 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
5194 if (SubByte < MinSubByte ||
5195 SubByte + BytesPerElement > ExtBytesPerElement)
5198 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
5200 Byte += SubByte - MinSubByte;
5201 if (Byte % BytesPerElement != 0)
5204 Index = Byte / BytesPerElement;
5222 SDValue SystemZTargetLowering::combineTruncateExtract(
5231 if (canTreatAsByteVector(VecVT)) {
5232 if (
auto *IndexN = dyn_cast<ConstantSDNode>(Op.
getOperand(1))) {
5235 if (BytesPerElement % TruncBytes == 0) {
5241 unsigned Scale = BytesPerElement / TruncBytes;
5242 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
5248 EVT ResVT = (TruncBytes < 4 ?
MVT::i32 : TruncVT);
5249 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
5257 SDValue SystemZTargetLowering::combineZERO_EXTEND(
5266 if (TrueOp && FalseOp) {
5284 SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
5305 SDValue SystemZTargetLowering::combineSIGN_EXTEND(
5316 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() ==
ISD::SHL) {
5317 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
5319 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
5320 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
5323 Inner.getOperand(0));
5335 SDValue SystemZTargetLowering::combineMERGE(
5344 cast<ConstantSDNode>(Op0.
getOperand(0))->getZExtValue() == 0) {
5352 if (ElemBytes <= 4) {
5370 SDValue SystemZTargetLowering::combineLOAD(
5391 else if (UI.getUse().getResNo() == 0)
5394 if (!Replicate || OtherUses.
empty())
5401 for (
SDNode *U : OtherUses) {
5410 SDValue SystemZTargetLowering::combineSTORE(
5413 auto *SN = cast<StoreSDNode>(
N);
5415 EVT MemVT = SN->getMemoryVT();
5420 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
5422 combineTruncateExtract(
SDLoc(N), MemVT, SN->getValue(), DCI)) {
5427 SN->getBasePtr(), SN->getMemoryVT(),
5428 SN->getMemOperand());
5432 if (!SN->isTruncatingStore() &&
5434 Op1.getNode()->hasOneUse() &&
5450 Ops, MemVT, SN->getMemOperand());
5455 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
5458 if (!Subtarget.hasVector())
5462 if (
auto *IndexN = dyn_cast<ConstantSDNode>(N->
getOperand(1))) {
5466 IndexN->getZExtValue(), DCI,
false);
5471 SDValue SystemZTargetLowering::combineJOIN_DWORDS(
5481 SDValue SystemZTargetLowering::combineFP_ROUND(
5496 cast<ConstantSDNode>(Op0.
getOperand(1))->getZExtValue() == 0) {
5498 for (
auto *U : Vec->
uses()) {
5502 U->getOperand(0) == Vec &&
5504 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
5528 SDValue SystemZTargetLowering::combineFP_EXTEND(
5543 cast<ConstantSDNode>(Op0.
getOperand(1))->getZExtValue() == 0) {
5545 for (
auto *U : Vec->
uses()) {
5549 U->getOperand(0) == Vec &&
5551 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
5575 SDValue SystemZTargetLowering::combineBSWAP(
5639 bool Invert =
false;
5652 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
5654 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
5660 if (!NewCCValid || !NewCCMask)
5663 CCMask = NewCCMask->getZExtValue();
5673 if (CompareLHS->getOpcode() ==
ISD::SRA) {
5675 if (!SRACount || SRACount->getZExtValue() != 30)
5677 auto *
SHL = CompareLHS->getOperand(0).getNode();
5683 auto *
IPM =
SHL->getOperand(0).getNode();
5688 if (!CompareLHS->hasOneUse())
5691 if (CompareRHS->getZExtValue() != 0)
5702 default:
return false;
5706 CCReg =
IPM->getOperand(0);
5713 SDValue SystemZTargetLowering::combineBR_CCMASK(
5714 SDNode *N, DAGCombinerInfo &DCI)
const {
5720 if (!CCValid || !CCMask)
5723 int CCValidVal = CCValid->getZExtValue();
5724 int CCMaskVal = CCMask->getZExtValue();
5737 SDValue SystemZTargetLowering::combineSELECT_CCMASK(
5738 SDNode *N, DAGCombinerInfo &DCI)
const {
5744 if (!CCValid || !CCMask)
5747 int CCValidVal = CCValid->getZExtValue();
5748 int CCMaskVal = CCMask->getZExtValue();
5762 SDValue SystemZTargetLowering::combineGET_CCMASK(
5763 SDNode *N, DAGCombinerInfo &DCI)
const {
5768 if (!CCValid || !CCMask)
5770 int CCValidVal = CCValid->getZExtValue();
5771 int CCMaskVal = CCMask->getZExtValue();
5779 if (!SelectCCValid || !SelectCCMask)
5781 int SelectCCValidVal = SelectCCValid->getZExtValue();
5782 int SelectCCMaskVal = SelectCCMask->getZExtValue();
5786 if (!TrueVal || !FalseVal)
5788 if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0)
5790 else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0)
5791 SelectCCMaskVal ^= SelectCCValidVal;
5795 if (SelectCCValidVal & ~CCValidVal)
5797 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
5803 SDValue SystemZTargetLowering::combineIntDIVREM(
5804 SDNode *N, DAGCombinerInfo &DCI)
const {
5829 case ISD::LOAD:
return combineLOAD(N, DCI);
5830 case ISD::STORE:
return combineSTORE(N, DCI);
5835 case ISD::BSWAP:
return combineBSWAP(N, DCI);
5842 case ISD::UREM:
return combineIntDIVREM(N, DCI);
5857 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
5872 SrcDemE = DemandedElts;
5875 SrcDemE = SrcDemE.
trunc(NumElts / 2);
5884 SrcDemE =
APInt(NumElts * 2, 0);
5893 SrcDemE =
APInt(NumElts * 2, 0);
5898 SrcDemE =
APInt(NumElts, 0);
5899 if (!DemandedElts[OpNo - 1])
5901 unsigned Mask = cast<ConstantSDNode>(Op.
getOperand(3))->getZExtValue();
5902 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
5904 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
5910 unsigned FirstIdx = cast<ConstantSDNode>(Op.
getOperand(3))->getZExtValue();
5911 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
5912 unsigned NumSrc0Els = 16 - FirstIdx;
5913 SrcDemE =
APInt(NumElts, 0);
5915 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
5918 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
5924 SrcDemE =
APInt(NumElts, 1);
5934 SrcDemE =
APInt(1, 1);
5937 SrcDemE = DemandedElts;
5948 const APInt &DemandedElts,
5958 Known.
One = LHSKnown.
One & RHSKnown.
One;
5964 const APInt &DemandedElts,
5966 unsigned Depth)
const {
5970 unsigned tmp0, tmp1;
5979 "KnownBits does not match VT in bitwidth");
5982 "DemandedElts does not match VT number of elements");
5987 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
6025 Known = Known.
zext(BitWidth);
6028 Known = Known.
sext(BitWidth);
6043 if (Known.
getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp))
6044 Known = Known.
sext(BitWidth);
6063 if (LHS == 1)
return 1;
6066 if (RHS == 1)
return 1;
6067 unsigned Common = std::min(LHS, RHS);
6071 if (SrcBitWidth > VTBits) {
6072 unsigned SrcExtraBits = SrcBitWidth - VTBits;
6073 if (Common > SrcExtraBits)
6074 return (Common - SrcExtraBits);
6077 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
6084 unsigned Depth)
const {
6089 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
6202 if (miI == MBB->
end()) {
6204 if ((*SI)->isLiveIn(SystemZ::CC))
6216 case SystemZ::Select32:
6217 case SystemZ::Select64:
6218 case SystemZ::SelectF32:
6219 case SystemZ::SelectF64:
6220 case SystemZ::SelectF128:
6221 case SystemZ::SelectVR32:
6222 case SystemZ::SelectVR64:
6223 case SystemZ::SelectVR128:
6243 unsigned CCValid = MIItBegin->getOperand(3).getImm();
6244 unsigned CCMask = MIItBegin->getOperand(4).getImm();
6245 DebugLoc DL = MIItBegin->getDebugLoc();
6258 unsigned DestReg = MIIt->getOperand(0).getReg();
6259 unsigned TrueReg = MIIt->getOperand(1).getReg();
6260 unsigned FalseReg = MIIt->getOperand(2).getReg();
6265 if (MIIt->getOperand(4).getImm() == (CCValid ^ CCMask))
6268 if (RegRewriteTable.
find(TrueReg) != RegRewriteTable.
end())
6269 TrueReg = RegRewriteTable[TrueReg].
first;
6271 if (RegRewriteTable.
find(FalseReg) != RegRewriteTable.
end())
6272 FalseReg = RegRewriteTable[FalseReg].
second;
6274 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->
get(SystemZ::PHI), DestReg)
6275 .addReg(TrueReg).
addMBB(TrueMBB)
6279 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
6303 NextMIIt->getOperand(3).getImm() == CCValid &&
6304 (NextMIIt->getOperand(4).getImm() == CCMask ||
6305 NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask))) {
6306 LastMI = &*NextMIIt;
6325 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6344 StartMBB->
erase(MIItBegin, MIItEnd);
6354 unsigned StoreOpcode,
6355 unsigned STOCOpcode,
6356 bool Invert)
const {
6373 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
6386 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
6417 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6426 BuildMI(MBB, DL, TII->get(StoreOpcode))
6446 unsigned BitSize,
bool Invert)
const {
6451 bool IsSubWord = (BitSize < 32);
6467 &SystemZ::GR32BitRegClass :
6468 &SystemZ::GR64BitRegClass);
6469 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
6470 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
6475 assert(LOpcode && CSOpcode &&
"Displacement out of range");
6480 unsigned NewVal = (BinOpcode || IsSubWord ?
6495 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).
add(Base).addImm(Disp).addReg(0);
6507 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
6508 .addReg(OrigVal).
addMBB(StartMBB)
6511 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
6516 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).
add(Src2);
6519 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
6520 .addReg(Tmp).
addImm(-1U << (32 - BitSize));
6525 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
6526 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
6527 .addReg(Tmp2).
addImm(-1);
6529 }
else if (BinOpcode)
6531 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
6532 .addReg(RotatedOldVal)
6537 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
6541 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
6543 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
6548 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6565 unsigned KeepOldMask,
unsigned BitSize)
const {
6570 bool IsSubWord = (BitSize < 32);
6585 &SystemZ::GR32BitRegClass :
6586 &SystemZ::GR64BitRegClass);
6587 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
6588 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
6593 assert(LOpcode && CSOpcode &&
"Displacement out of range");
6615 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).
add(Base).addImm(Disp).addReg(0);
6624 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
6625 .addReg(OrigVal).
addMBB(StartMBB)
6628 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
6630 BuildMI(MBB, DL, TII->get(CompareOpcode))
6631 .addReg(RotatedOldVal).
addReg(Src2);
6632 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6642 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
6643 .addReg(RotatedOldVal).
addReg(Src2)
6655 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
6656 .addReg(RotatedOldVal).
addMBB(LoopMBB)
6659 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
6661 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
6666 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6678 SystemZTargetLowering::emitAtomicCmpSwapW(
MachineInstr &MI,
6702 assert(LOpcode && CSOpcode &&
"Displacement out of range");
6725 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
6746 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
6747 .addReg(OrigOldVal).
addMBB(StartMBB)
6749 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
6750 .addReg(OrigCmpVal).
addMBB(StartMBB)
6752 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
6753 .addReg(OrigSwapVal).
addMBB(StartMBB)
6755 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
6757 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
6759 BuildMI(MBB, DL, TII->get(SystemZ::CR))
6760 .addReg(Dest).
addReg(RetryCmpVal);
6761 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6777 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
6779 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
6780 .addReg(RetrySwapVal).
addReg(NegBitShift).
addImm(-BitSize);
6781 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
6786 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6817 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1);
6818 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2)
6820 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
6832 bool ClearEven)
const {
6843 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
6848 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
6850 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
6851 .addReg(In128).
addReg(Zero64).
addImm(SystemZ::subreg_h64);
6854 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
6855 .addReg(In128).
addReg(Src).
addImm(SystemZ::subreg_l64);
6885 uint64_t StartSrcReg =
forceReg(MI, SrcBase, TII);
6886 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
6891 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
6894 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
6897 RC = &SystemZ::GR64BitRegClass;
6924 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
6925 .addReg(StartDestReg).
addMBB(StartMBB)
6927 if (!HaveSingleBase)
6928 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
6929 .addReg(StartSrcReg).
addMBB(StartMBB)
6931 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
6932 .addReg(StartCountReg).
addMBB(StartMBB)
6935 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
6938 BuildMI(MBB, DL, TII->get(Opcode))
6942 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6960 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
6962 if (!HaveSingleBase)
6963 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
6965 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
6966 .addReg(ThisCountReg).
addImm(-1);
6967 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
6968 .addReg(NextCountReg).
addImm(0);
6969 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6978 if (EndMBB && !Length)
6985 while (Length > 0) {
6986 uint64_t ThisLength = std::min(Length, uint64_t(256));
6989 if (!isUInt<12>(DestDisp)) {
6998 if (!isUInt<12>(SrcDisp)) {
7007 BuildMI(*MBB, MI, DL, TII->get(Opcode))
7014 DestDisp += ThisLength;
7015 SrcDisp += ThisLength;
7016 Length -= ThisLength;
7019 if (EndMBB && Length > 0) {
7021 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7078 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
7079 .addReg(Start1Reg).
addMBB(StartMBB)
7081 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
7082 .addReg(Start2Reg).
addMBB(StartMBB)
7084 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
7085 BuildMI(MBB, DL, TII->get(Opcode))
7088 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7102 bool NoFloat)
const {
7113 static const unsigned GPRControlBit[16] = {
7114 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
7115 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
7117 Control |= GPRControlBit[15];
7119 Control |= GPRControlBit[11];
7123 for (
int I = 0;
I < 16;
I++) {
7124 if ((Control & GPRControlBit[
I]) == 0) {
7131 if (!NoFloat && (Control & 4) != 0) {
7132 if (Subtarget.hasVector()) {
7133 for (
int I = 0;
I < 32;
I++) {
7138 for (
int I = 0;
I < 16;
I++) {
7164 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
7174 case SystemZ::Select32:
7175 case SystemZ::Select64:
7176 case SystemZ::SelectF32:
7177 case SystemZ::SelectF64:
7178 case SystemZ::SelectF128:
7179 case SystemZ::SelectVR32:
7180 case SystemZ::SelectVR64:
7181 case SystemZ::SelectVR128:
7182 return emitSelect(MI, MBB);
7184 case SystemZ::CondStore8Mux:
7185 return emitCondStore(MI, MBB, SystemZ::STCMux, 0,
false);
7186 case SystemZ::CondStore8MuxInv:
7187 return emitCondStore(MI, MBB, SystemZ::STCMux, 0,
true);
7188 case SystemZ::CondStore16Mux:
7189 return emitCondStore(MI, MBB, SystemZ::STHMux, 0,
false);
7190 case SystemZ::CondStore16MuxInv:
7191 return emitCondStore(MI, MBB, SystemZ::STHMux, 0,
true);
7192 case SystemZ::CondStore32Mux:
7193 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
7194 case SystemZ::CondStore32MuxInv:
7195 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
7196 case SystemZ::CondStore8:
7197 return emitCondStore(MI, MBB, SystemZ::STC, 0,
false);
7198 case SystemZ::CondStore8Inv:
7199 return emitCondStore(MI, MBB, SystemZ::STC, 0,
true);
7200 case SystemZ::CondStore16:
7201 return emitCondStore(MI, MBB, SystemZ::STH, 0,
false);
7202 case SystemZ::CondStore16Inv:
7203 return emitCondStore(MI, MBB, SystemZ::STH, 0,
true);
7204 case SystemZ::CondStore32:
7205 return emitCondStore(MI, MBB,
SystemZ::ST, SystemZ::STOC,
false);
7206 case SystemZ::CondStore32Inv:
7207 return emitCondStore(MI, MBB,
SystemZ::ST, SystemZ::STOC,
true);
7208 case SystemZ::CondStore64:
7209 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG,
false);
7210 case SystemZ::CondStore64Inv:
7211 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG,
true);
7212 case SystemZ::CondStoreF32:
7213 return emitCondStore(MI, MBB, SystemZ::STE, 0,
false);
7214 case SystemZ::CondStoreF32Inv:
7215 return emitCondStore(MI, MBB, SystemZ::STE, 0,
true);
7216 case SystemZ::CondStoreF64:
7217 return emitCondStore(MI, MBB, SystemZ::STD, 0,
false);
7218 case SystemZ::CondStoreF64Inv:
7219 return emitCondStore(MI, MBB, SystemZ::STD, 0,
true);
7221 case SystemZ::PAIR128:
7222 return emitPair128(MI, MBB);
7223 case SystemZ::AEXT128:
7224 return emitExt128(MI, MBB,
false);
7225 case SystemZ::ZEXT128:
7226 return emitExt128(MI, MBB,
true);
7229 return emitAtomicLoadBinary(MI, MBB, 0, 0);
7230 case SystemZ::ATOMIC_SWAP_32:
7231 return emitAtomicLoadBinary(MI, MBB, 0, 32);
7232 case SystemZ::ATOMIC_SWAP_64:
7233 return emitAtomicLoadBinary(MI, MBB, 0, 64);
7235 case SystemZ::ATOMIC_LOADW_AR:
7236 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
7237 case SystemZ::ATOMIC_LOADW_AFI:
7238 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
7239 case SystemZ::ATOMIC_LOAD_AR:
7240 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
7241 case SystemZ::ATOMIC_LOAD_AHI:
7242 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
7243 case SystemZ::ATOMIC_LOAD_AFI:
7244 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
7245 case SystemZ::ATOMIC_LOAD_AGR:
7246 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
7247 case SystemZ::ATOMIC_LOAD_AGHI:
7248 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
7249 case SystemZ::ATOMIC_LOAD_AGFI:
7250 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
7252 case SystemZ::ATOMIC_LOADW_SR:
7253 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
7254 case SystemZ::ATOMIC_LOAD_SR:
7255 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
7256 case SystemZ::ATOMIC_LOAD_SGR:
7257 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
7259 case SystemZ::ATOMIC_LOADW_NR:
7260 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
7261 case SystemZ::ATOMIC_LOADW_NILH:
7262 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
7263 case SystemZ::ATOMIC_LOAD_NR:
7264 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
7265 case SystemZ::ATOMIC_LOAD_NILL:
7266 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
7267 case SystemZ::ATOMIC_LOAD_NILH:
7268 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
7269 case SystemZ::ATOMIC_LOAD_NILF:
7270 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
7271 case SystemZ::ATOMIC_LOAD_NGR:
7272 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
7273 case SystemZ::ATOMIC_LOAD_NILL64:
7274 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
7275 case SystemZ::ATOMIC_LOAD_NILH64:
7276 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
7277 case SystemZ::ATOMIC_LOAD_NIHL64:
7278 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
7279 case SystemZ::ATOMIC_LOAD_NIHH64:
7280 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
7281 case SystemZ::ATOMIC_LOAD_NILF64:
7282 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
7283 case SystemZ::ATOMIC_LOAD_NIHF64:
7284 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
7287 return emitAtomicLoadBinary(MI, MBB,
SystemZ::OR, 0);
7288 case SystemZ::ATOMIC_LOADW_OILH:
7289 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
7291 return emitAtomicLoadBinary(MI, MBB,
SystemZ::OR, 32);
7292 case SystemZ::ATOMIC_LOAD_OILL:
7293 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
7294 case SystemZ::ATOMIC_LOAD_OILH:
7295 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
7296 case SystemZ::ATOMIC_LOAD_OILF:
7297 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
7298 case SystemZ::ATOMIC_LOAD_OGR:
7299 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
7300 case SystemZ::ATOMIC_LOAD_OILL64:
7301 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
7302 case SystemZ::ATOMIC_LOAD_OILH64:
7303 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
7304 case SystemZ::ATOMIC_LOAD_OIHL64:
7305 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
7306 case SystemZ::ATOMIC_LOAD_OIHH64:
7307 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
7308 case SystemZ::ATOMIC_LOAD_OILF64:
7309 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
7310 case SystemZ::ATOMIC_LOAD_OIHF64:
7311 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
7313 case SystemZ::ATOMIC_LOADW_XR:
7314 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
7315 case SystemZ::ATOMIC_LOADW_XILF:
7316 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
7317 case SystemZ::ATOMIC_LOAD_XR:
7318 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
7319 case SystemZ::ATOMIC_LOAD_XILF:
7320 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
7321 case SystemZ::ATOMIC_LOAD_XGR:
7322 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
7323 case SystemZ::ATOMIC_LOAD_XILF64:
7324 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
7325 case SystemZ::ATOMIC_LOAD_XIHF64:
7326 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
7328 case SystemZ::ATOMIC_LOADW_NRi:
7329 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0,
true);
7330 case SystemZ::ATOMIC_LOADW_NILHi:
7331 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0,
true);
7332 case SystemZ::ATOMIC_LOAD_NRi:
7333 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32,
true);
7334 case SystemZ::ATOMIC_LOAD_NILLi:
7335 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32,
true);
7336 case SystemZ::ATOMIC_LOAD_NILHi:
7337 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32,
true);
7338 case SystemZ::ATOMIC_LOAD_NILFi:
7339 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32,
true);
7340 case SystemZ::ATOMIC_LOAD_NGRi:
7341 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64,
true);
7342 case SystemZ::ATOMIC_LOAD_NILL64i:
7343 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64,
true);
7344 case SystemZ::ATOMIC_LOAD_NILH64i:
7345 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64,
true);
7346 case SystemZ::ATOMIC_LOAD_NIHL64i:
7347 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64,
true);
7348 case SystemZ::ATOMIC_LOAD_NIHH64i:
7349 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64,
true);
7350 case SystemZ::ATOMIC_LOAD_NILF64i:
7351 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64,
true);
7352 case SystemZ::ATOMIC_LOAD_NIHF64i:
7353 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64,
true);
7356 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
7358 case SystemZ::ATOMIC_LOAD_MIN_32:
7359 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
7361 case SystemZ::ATOMIC_LOAD_MIN_64:
7362 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
7366 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
7368 case SystemZ::ATOMIC_LOAD_MAX_32:
7369 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
7371 case SystemZ::ATOMIC_LOAD_MAX_64:
7372 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
7376 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
7378 case SystemZ::ATOMIC_LOAD_UMIN_32:
7379 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
7381 case SystemZ::ATOMIC_LOAD_UMIN_64:
7382 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
7386 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
7388 case SystemZ::ATOMIC_LOAD_UMAX_32:
7389 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
7391 case SystemZ::ATOMIC_LOAD_UMAX_64:
7392 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
7396 return emitAtomicCmpSwapW(MI, MBB);
7397 case SystemZ::MVCSequence:
7398 case SystemZ::MVCLoop:
7400 case SystemZ::NCSequence:
7401 case SystemZ::NCLoop:
7403 case SystemZ::OCSequence:
7404 case SystemZ::OCLoop:
7406 case SystemZ::XCSequence:
7407 case SystemZ::XCLoop:
7409 case SystemZ::CLCSequence:
7410 case SystemZ::CLCLoop:
7412 case SystemZ::CLSTLoop:
7413 return emitStringWrapper(MI, MBB, SystemZ::CLST);
7414 case SystemZ::MVSTLoop:
7415 return emitStringWrapper(MI, MBB, SystemZ::MVST);
7416 case SystemZ::SRSTLoop:
7417 return emitStringWrapper(MI, MBB, SystemZ::SRST);
7420 case SystemZ::TBEGIN_nofloat:
7422 case SystemZ::TBEGINC:
7423 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC,
true);
7424 case SystemZ::LTEBRCompare_VecPseudo:
7425 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
7426 case SystemZ::LTDBRCompare_VecPseudo:
7427 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
7428 case SystemZ::LTXBRCompare_VecPseudo:
7429 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);
7431 case TargetOpcode::STACKMAP:
7432 case TargetOpcode::PATCHPOINT:
7443 SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
7445 return &SystemZ::ADDR128BitRegClass;
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
bool isMachineConstantPoolEntry() const
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
void setFrameAddressIsTaken(bool T)
const unsigned CCMASK_CMP_GT
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
constexpr bool isUInt< 32 >(uint64_t x)
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
const MachineInstrBuilder & add(const MachineOperand &MO) const
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
BUILTIN_OP_END - This must be the last enum value in this list.
A parsed version of the target data layout string in and methods for querying it. ...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
EVT getValueType() const
Return the ValueType of the referenced return value.
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
static void VerifyVectorTypes(const SmallVectorImpl< ISD::InputArg > &Ins)
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const unsigned CCMASK_ARITH
C - The default llvm calling convention, compatible with C.
T findLastSet(T Val, ZeroBehavior ZB=ZB_Max)
Get the index of the last set bit starting from the least significant bit.
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
uint64_t getZExtValue() const
Get zero extended value.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
const unsigned GR32Regs[16]
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
const int64_t CallFrameSize
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
This class represents lattice values for constants.
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static const Permute PermuteForms[]
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
const MCPhysReg ArgFPRs[NumArgFPRs]
static MVT getVectorVT(MVT VT, unsigned NumElements)
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
const unsigned CCMASK_FCMP
const unsigned FP128Regs[16]
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
unsigned odd128(bool Is32bit)
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
bool isVector() const
Return true if this is a vector value type.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
static bool isImmHH(uint64_t Val)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void push_back(const T &Elt)
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
SDVTList getVTList() const
This class represents a function call, abstracting a target machine's calling convention.
unsigned getRegSaveFrameIndex() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
const SDValue & getChain() const
static SDValue tryBuildVectorReplicate(SelectionDAG &DAG, const SystemZInstrInfo *TII, const SDLoc &DL, EVT VT, uint64_t Value, unsigned BitsPerElement)
Function Alias Analysis Results
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
APInt trunc(unsigned width) const
Truncate to new width.
constexpr bool isInt< 16 >(int64_t x)
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
unsigned const TargetRegisterInfo * TRI
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
const unsigned FP32Regs[16]
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
SDNode * getNode() const
get the SDNode which holds the desired result
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isVectorTy() const
True if this is an instance of VectorType.
void reserve(size_type N)
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_LOGICAL_CARRY
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
unsigned getBitWidth() const
Get the bit width of this value.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
static bool shouldSwapCmpOperands(const Comparison &C)
const unsigned CCMASK_ICMP
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP)
int getFramePointerSaveIndex() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const unsigned VR64Regs[32]
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
bool hasOneUse() const
Return true if there is exactly one use of this node.
A description of a memory reference used in the backend.
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
void setBit(unsigned BitPosition)
Set a given bit to 1.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
static MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const HexagonInstrInfo * TII
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Shift and rotation operations.
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register...
const unsigned CCMASK_TM_MSB_0
void setVarArgsFirstGPR(unsigned GPR)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
static Optional< unsigned > getOpcode(ArrayRef< VPValue *> Values)
Returns the opcode of Values or ~0 if they do not all agree.
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool isIntegerTy() const
True if this is an instance of IntegerType.
unsigned getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
const BlockAddress * getBlockAddress() const
LocInfo getLocInfo() const
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
KnownBits zext(unsigned BitWidth)
Zero extends the underlying known Zero and One bits.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
static bool isScalarToVector(SDValue Op)
Fast - This calling convention attempts to make calls as fast as possible (e.g.
unsigned getScalarSizeInBits() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Type * getType() const
All values are typed, get the type of this value.
MachineFunction & getMachineFunction() const
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
SDValue getRegisterMask(const uint32_t *RegMask)
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
const TargetMachine & getTarget() const
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
const unsigned CCMASK_TBEGIN
SmallVector< ISD::OutputArg, 32 > Outs
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
const unsigned NumArgFPRs
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
void incNumLocalDynamicTLSAccesses()
KnownBits zextOrTrunc(unsigned BitWidth)
Zero extends or truncates the underlying known Zero and One bits.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
const unsigned CCMASK_ANY
virtual const TargetInstrInfo * getInstrInfo() const
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
const unsigned VectorBits
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
ArrayRef< SDUse > ops() const
const unsigned CCMASK_CS_NE
const unsigned CCMASK_TM_SOME_0
amdgpu Simplify well known AMD library false Value * Callee
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Value * getOperand(unsigned i) const
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const unsigned CCMASK_TM_ALL_1
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
UNDEF - An undefined node.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
TargetInstrInfo - Interface to description of machine instruction set.
iterator find(const_arg_type_t< KeyT > Val)
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
const unsigned CCMASK_LOGICAL
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const unsigned CCMASK_LOGICAL_BORROW
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
static void adjustForLTGFR(Comparison &C)
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
MachineInstrBundleIterator< MachineInstr > iterator
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
int64_t getOffset() const
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
constexpr bool isUInt< 8 >(uint64_t x)
unsigned const MachineRegisterInfo * MRI
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const unsigned CCMASK_VCMP
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool registerDefIsDead(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Returns true if the register is dead in this machine instruction.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
void resetAll()
Resets the known state of all bits.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset) const
const SDValue & getOperand(unsigned Num) const
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const unsigned CCMASK_CMP_LE
static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SmallVectorImpl< SDValue > &Elems)
static ManagedStatic< OptionRegistry > OR
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
void AddToWorklist(SDNode *N)
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
static mvt_range fp_valuetypes()
This class provides iterator support for SDUse operands that use a specific SDNode.
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op)
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
void setRegSaveFrameIndex(unsigned FI)
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
const unsigned CCMASK_TM_MSB_1
TRAP - Trapping instruction.
bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
const unsigned CCMASK_CMP_LT
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
static mvt_range vector_valuetypes()
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
const unsigned CCMASK_TDC
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL)
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
succ_iterator succ_begin()
unsigned getAlignment() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, CCValAssign &VA, SDValue Chain, SDValue Value)
This structure contains all information that is necessary for lowering calls.
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool isImmLH(uint64_t Val)
T findFirstSet(T Val, ZeroBehavior ZB=ZB_Max)
Get the index of the first set bit starting from the least significant bit.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, bool *Fast) const override
Determine if the target supports unaligned memory accesses.
This class contains a discriminated union of information about pointers in memory operands...
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasVectorEnhancements1() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AddressingMode(bool LongDispl, bool IdxReg)
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
const unsigned CCMASK_TM_MIXED_MSB_0
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
void setIsKill(bool Val=true)
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
static bool isUndef(ArrayRef< int > Mask)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
const TargetLowering & getTargetLoweringInfo() const
Iterator for intrusive lists based on ilist_node.
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
CCState - This class holds information needed while lowering arguments and return values...
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
constexpr bool isInt< 32 >(int64_t x)
BlockVerifier::State From
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
unsigned getVarArgsFirstGPR() const
EVT getVectorElementType() const
Given a vector type, return the type of each element.
testing::Matcher< const detail::ErrorHolder & > Failed()
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
const unsigned CCMASK_CMP_EQ
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
const unsigned FP64Regs[16]
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
BRCOND - Conditional branch.
Information about stack frame layout on the target.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask)
static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, CCValAssign &VA, SDValue Value)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Byte Swap and Counting operators.
const unsigned CCMASK_CMP_O
const unsigned GR128Regs[16]
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const unsigned CCMASK_CMP_NE
Represents one node in the SelectionDAG.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
const unsigned GR64Regs[16]
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static mvt_range integer_valuetypes()
const unsigned CCMASK_TM_MIXED_MSB_1
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N)
Test whether the given value is a constant int or similar node.
EVT getMemoryVT() const
Return the type of the in-memory value.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Class for arbitrary precision integers.
CodeModel::Model getCodeModel() const
Returns the code model.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
const unsigned VectorBytes
iterator_range< use_iterator > uses()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
void setVarArgsFirstFPR(unsigned FPR)
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
static void VerifyVectorType(MVT VT, EVT ArgVT)
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ANY_EXTEND - Used for integer types. The high bits are undefined.
const unsigned CCMASK_CMP_GE
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
static MachineOperand earlyUseOperand(MachineOperand Op)
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
const MachineBasicBlock * getParent() const
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
SmallVector< SDValue, 32 > OutVals
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isVector() const
Return true if this is a vector value type.
static MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getLocMemOffset() const
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
LLVM_NODISCARD bool empty() const
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
static bool is32Bit(EVT VT)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
int64_t getOffset() const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
const unsigned VR32Regs[32]
static bool isSelectPseudo(MachineInstr &MI)
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
constexpr bool isUInt< 16 >(uint64_t x)
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static unsigned reverseCCMask(unsigned CCMask)
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, const Value *PtrVal, unsigned Alignment, AtomicOrdering Ordering, SyncScope::ID SSID)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
user_iterator user_begin()
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
bool hasFPExtension() const
void setReturnAddressIsTaken(bool s)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
unsigned even128(bool Is32bit)
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
Synchronized with respect to all concurrently executing threads.
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
SDValue getValueType(EVT)
KnownBits sext(unsigned BitWidth)
Sign extends the underlying known Zero and One bits.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
PREFETCH - This corresponds to a prefetch intrinsic.
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone...
const unsigned CCMASK_ARITH_OVERFLOW
static bool isImmLL(uint64_t Val)
Primary interface to the complete machine description for the target machine.
A SystemZ-specific constant pool value.
bool isRxSBGMask(uint64_t Mask, unsigned BitSize, unsigned &Start, unsigned &End) const
unsigned getVarArgsFirstFPR() const
bool hasOneUse() const
Return true if there is exactly one user of this value.
StringRef - Represent a constant reference to a string, i.e.
SetCC operator - This evaluates to a true value iff the condition is true.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getNumOperands() const
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
static void adjustForFNeg(Comparison &C)
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
unsigned getLocReg() const
const unsigned CCMASK_TM_ALL_0
uint64_t getZExtValue() const
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
TRUNCATE - Completely drop the high bits.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
const unsigned CCMASK_TEND
const MachineOperand & getOperand(unsigned i) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
Perform various unary floating-point operations inspired by libm.
bool hasPopulationCount() const
const unsigned VR128Regs[32]
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP, bool &Invert)
const SystemZRegisterInfo * getRegisterInfo() const override
LLVMContext * getContext() const
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
const unsigned CCMASK_TM_SOME_1
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Carry-using nodes for multiple precision addition and subtraction.
A wrapper class for inspecting calls to intrinsic functions.
const BasicBlock * getParent() const
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
const unsigned CCMASK_CMP_UO
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
static bool isImmHL(uint64_t Val)