49 #define DEBUG_TYPE "X86-isel" 55 #define GET_GLOBALISEL_PREDICATE_BITSET 56 #include "X86GenGlobalISel.inc" 57 #undef GET_GLOBALISEL_PREDICATE_BITSET 73 unsigned getLoadStoreOp(
const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
74 uint64_t Alignment)
const;
110 const unsigned DstReg,
112 const unsigned SrcReg,
125 bool emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &I,
128 bool emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
MachineInstr &I,
141 #define GET_GLOBALISEL_PREDICATES_DECL 142 #include "X86GenGlobalISel.inc" 143 #undef GET_GLOBALISEL_PREDICATES_DECL 145 #define GET_GLOBALISEL_TEMPORARIES_DECL 146 #include "X86GenGlobalISel.inc" 147 #undef GET_GLOBALISEL_TEMPORARIES_DECL 152 #define GET_GLOBALISEL_IMPL 153 #include "X86GenGlobalISel.inc" 154 #undef GET_GLOBALISEL_IMPL 160 TRI(*STI.getRegisterInfo()), RBI(RBI),
162 #include
"X86GenGlobalISel.inc" 165 #include
"X86GenGlobalISel.inc" 174 if (RB.
getID() == X86::GPRRegBankID) {
176 return &X86::GR8RegClass;
178 return &X86::GR16RegClass;
180 return &X86::GR32RegClass;
182 return &X86::GR64RegClass;
184 if (RB.
getID() == X86::VECRRegBankID) {
186 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
188 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
190 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
192 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
194 return &X86::VR512RegClass;
208 unsigned SubIdx = X86::NoSubRegister;
209 if (RC == &X86::GR32RegClass) {
210 SubIdx = X86::sub_32bit;
211 }
else if (RC == &X86::GR16RegClass) {
212 SubIdx = X86::sub_16bit;
213 }
else if (RC == &X86::GR8RegClass) {
214 SubIdx = X86::sub_8bit;
221 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
222 if (X86::GR64RegClass.
contains(Reg))
223 return &X86::GR64RegClass;
224 if (X86::GR32RegClass.
contains(Reg))
225 return &X86::GR32RegClass;
226 if (X86::GR16RegClass.
contains(Reg))
227 return &X86::GR16RegClass;
229 return &X86::GR8RegClass;
238 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI,
TRI);
242 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI,
TRI);
245 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
246 assert(I.
isCopy() &&
"Generic operators do not allow physical registers");
248 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
249 DstRegBank.
getID() == X86::GPRRegBankID) {
255 if (SrcRC != DstRC) {
259 TII.get(TargetOpcode::SUBREG_TO_REG))
263 .
addImm(getSubRegIndex(SrcRC));
272 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.
isCopy()) &&
273 "No phys reg on generic operators");
274 assert((DstSize == SrcSize ||
277 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
278 DstSize <= RBI.getSizeInBits(SrcReg, MRI,
TRI))) &&
279 "Copy with different width?!");
284 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
285 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
286 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
291 if (DstRC != SrcRC) {
302 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
325 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
335 "Generic instruction has unexpected implicit operands\n");
346 case TargetOpcode::G_STORE:
347 case TargetOpcode::G_LOAD:
348 return selectLoadStoreOp(I, MRI, MF);
349 case TargetOpcode::G_GEP:
350 case TargetOpcode::G_FRAME_INDEX:
351 return selectFrameIndexOrGep(I, MRI, MF);
352 case TargetOpcode::G_GLOBAL_VALUE:
353 return selectGlobalValue(I, MRI, MF);
354 case TargetOpcode::G_CONSTANT:
355 return selectConstant(I, MRI, MF);
356 case TargetOpcode::G_FCONSTANT:
357 return materializeFP(I, MRI, MF);
358 case TargetOpcode::G_PTRTOINT:
359 case TargetOpcode::G_TRUNC:
360 return selectTruncOrPtrToInt(I, MRI, MF);
361 case TargetOpcode::G_INTTOPTR:
363 case TargetOpcode::G_ZEXT:
364 return selectZext(I, MRI, MF);
365 case TargetOpcode::G_ANYEXT:
366 return selectAnyext(I, MRI, MF);
367 case TargetOpcode::G_ICMP:
368 return selectCmp(I, MRI, MF);
369 case TargetOpcode::G_FCMP:
370 return selectFCmp(I, MRI, MF);
371 case TargetOpcode::G_UADDE:
372 return selectUadde(I, MRI, MF);
373 case TargetOpcode::G_UNMERGE_VALUES:
375 case TargetOpcode::G_MERGE_VALUES:
376 case TargetOpcode::G_CONCAT_VECTORS:
378 case TargetOpcode::G_EXTRACT:
379 return selectExtract(I, MRI, MF);
380 case TargetOpcode::G_INSERT:
381 return selectInsert(I, MRI, MF);
382 case TargetOpcode::G_BRCOND:
383 return selectCondBranch(I, MRI, MF);
384 case TargetOpcode::G_IMPLICIT_DEF:
385 case TargetOpcode::G_PHI:
386 return selectImplicitDefOrPHI(I, MRI);
387 case TargetOpcode::G_SHL:
388 case TargetOpcode::G_ASHR:
389 case TargetOpcode::G_LSHR:
390 return selectShift(I, MRI, MF);
391 case TargetOpcode::G_SDIV:
392 case TargetOpcode::G_UDIV:
393 case TargetOpcode::G_SREM:
394 case TargetOpcode::G_UREM:
395 return selectDivRem(I, MRI, MF);
396 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
397 return selectIntrinsicWSideEffects(I, MRI, MF);
403 unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
406 uint64_t Alignment)
const {
407 bool Isload = (Opc == TargetOpcode::G_LOAD);
408 bool HasAVX = STI.hasAVX();
409 bool HasAVX512 = STI.hasAVX512();
410 bool HasVLX = STI.hasVLX();
412 if (Ty == LLT::scalar(8)) {
413 if (X86::GPRRegBankID == RB.
getID())
414 return Isload ? X86::MOV8rm : X86::MOV8mr;
415 }
else if (Ty == LLT::scalar(16)) {
416 if (X86::GPRRegBankID == RB.
getID())
417 return Isload ? X86::MOV16rm : X86::MOV16mr;
418 }
else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
419 if (X86::GPRRegBankID == RB.
getID())
420 return Isload ? X86::MOV32rm : X86::MOV32mr;
421 if (X86::VECRRegBankID == RB.
getID())
422 return Isload ? (HasAVX512 ? X86::VMOVSSZrm
423 : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
424 : (HasAVX512 ? X86::VMOVSSZmr
425 : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
426 }
else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
427 if (X86::GPRRegBankID == RB.
getID())
428 return Isload ? X86::MOV64rm : X86::MOV64mr;
429 if (X86::VECRRegBankID == RB.
getID())
430 return Isload ? (HasAVX512 ? X86::VMOVSDZrm
431 : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
432 : (HasAVX512 ? X86::VMOVSDZmr
433 : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
436 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
438 ? X86::VMOVAPSZ128rm_NOVLX
439 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
440 : (HasVLX ? X86::VMOVAPSZ128mr
442 ? X86::VMOVAPSZ128mr_NOVLX
443 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
445 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
447 ? X86::VMOVUPSZ128rm_NOVLX
448 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
449 : (HasVLX ? X86::VMOVUPSZ128mr
451 ? X86::VMOVUPSZ128mr_NOVLX
452 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
455 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
456 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
458 : (HasVLX ? X86::VMOVAPSZ256mr
459 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
462 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
463 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
465 : (HasVLX ? X86::VMOVUPSZ256mr
466 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
470 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
472 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
483 "unsupported type.");
485 if (I.
getOpcode() == TargetOpcode::G_GEP) {
489 AM.
Disp =
static_cast<int32_t
>(Imm);
494 }
else if (I.
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
496 AM.
BaseType = X86AddressMode::FrameIndexBase;
504 bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &I,
509 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
510 "unexpected instruction");
517 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
522 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
531 if (Opc == TargetOpcode::G_LOAD) {
544 if (Ty == LLT::pointer(0, 64))
546 else if (Ty == LLT::pointer(0, 32))
552 bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &I,
557 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
558 "unexpected instruction");
564 unsigned NewOpc =
getLeaOP(Ty, STI);
568 if (Opc == TargetOpcode::G_FRAME_INDEX) {
580 bool X86InstructionSelector::selectGlobalValue(
MachineInstr &I,
584 "unexpected instruction");
587 if (GV->isThreadLocal()) {
597 AM.
GVOpFlags = STI.classifyGlobalReference(GV);
607 if (STI.isPICStyleRIPRel()) {
615 unsigned NewOpc =
getLeaOP(Ty, STI);
626 bool X86InstructionSelector::selectConstant(
MachineInstr &I,
630 "unexpected instruction");
635 if (RBI.getRegBank(DefReg, MRI,
TRI)->getID() != X86::GPRRegBankID)
650 NewOpc = X86::MOV8ri;
653 NewOpc = X86::MOV16ri;
656 NewOpc = X86::MOV32ri;
661 NewOpc = X86::MOV64ri32;
663 NewOpc = X86::MOV64ri;
678 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
679 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
680 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
683 bool X86InstructionSelector::selectTurnIntoCOPY(
688 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
689 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
698 bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &I,
702 I.
getOpcode() == TargetOpcode::G_PTRTOINT) &&
703 "unexpected instruction");
716 <<
" input/output on different banks\n");
723 if (!DstRC || !SrcRC)
730 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
732 if (DstRB.
getID() != X86::GPRRegBankID)
736 if (DstRC == SrcRC) {
738 SubIdx = X86::NoSubRegister;
739 }
else if (DstRC == &X86::GR32RegClass) {
740 SubIdx = X86::sub_32bit;
741 }
else if (DstRC == &X86::GR16RegClass) {
742 SubIdx = X86::sub_16bit;
743 }
else if (DstRC == &X86::GR8RegClass) {
744 SubIdx = X86::sub_8bit;
749 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
751 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
752 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
764 bool X86InstructionSelector::selectZext(
MachineInstr &I,
767 assert((I.
getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
775 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
776 "8=>32 Zext is handled by tablegen");
777 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
778 "16=>32 Zext is handled by tablegen");
780 const static struct ZextEntry {
784 bool NeedSubregToReg;
786 {LLT::scalar(8), LLT::scalar(16), X86::MOVZX16rr8,
false},
787 {LLT::scalar(8), LLT::scalar(64), X86::MOVZX32rr8,
true},
788 {LLT::scalar(16), LLT::scalar(64), X86::MOVZX32rr16,
true},
789 {LLT::scalar(32), LLT::scalar(64), 0,
true}
794 [SrcTy, DstTy](
const ZextEntry &El) {
795 return El.DstTy == DstTy && El.SrcTy == SrcTy;
799 if (ZextEntryIt !=
std::end(OpTable)) {
805 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
806 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
812 unsigned TransitRegTo = DstReg;
813 unsigned TransitRegFrom = SrcReg;
814 if (ZextEntryIt->MovOp) {
817 if (ZextEntryIt->NeedSubregToReg) {
820 TransitRegTo = TransitRegFrom;
824 .addDef(TransitRegTo)
827 if (ZextEntryIt->NeedSubregToReg) {
829 TII.get(TargetOpcode::SUBREG_TO_REG))
839 if (SrcTy != LLT::scalar(1))
843 if (DstTy == LLT::scalar(8))
844 AndOpc = X86::AND8ri;
845 else if (DstTy == LLT::scalar(16))
846 AndOpc = X86::AND16ri8;
847 else if (DstTy == LLT::scalar(32))
848 AndOpc = X86::AND32ri8;
849 else if (DstTy == LLT::scalar(64))
850 AndOpc = X86::AND64ri8;
854 unsigned DefReg = SrcReg;
855 if (DstTy != LLT::scalar(8)) {
858 TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
875 bool X86InstructionSelector::selectAnyext(
MachineInstr &I,
878 assert((I.
getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
890 "G_ANYEXT input/output on different banks\n");
893 "G_ANYEXT incorrect operand size");
902 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
904 if (DstRB.
getID() != X86::GPRRegBankID)
907 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
908 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
914 if (SrcRC == DstRC) {
920 TII.get(TargetOpcode::SUBREG_TO_REG))
924 .
addImm(getSubRegIndex(SrcRC));
933 assert((I.
getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
957 OpCmp = X86::CMP16rr;
960 OpCmp = X86::CMP32rr;
963 OpCmp = X86::CMP64rr;
982 bool X86InstructionSelector::selectFCmp(
MachineInstr &I,
985 assert((I.
getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
993 static const uint16_t SETFOpcTable[2][3] = {
994 {X86::SETEr, X86::SETNPr, X86::AND8rr},
995 {X86::SETNEr, X86::SETPr, X86::OR8rr}};
996 const uint16_t *SETFOpc =
nullptr;
1000 case CmpInst::FCMP_OEQ:
1001 SETFOpc = &SETFOpcTable[0][0];
1003 case CmpInst::FCMP_UNE:
1004 SETFOpc = &SETFOpcTable[1][0];
1015 OpCmp = X86::UCOMISSrr;
1018 OpCmp = X86::UCOMISDrr;
1023 RBI.constrainGenericRegister(
1025 *
getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI,
TRI)), MRI);
1035 TII.get(SETFOpc[0]), FlagReg1);
1037 TII.get(SETFOpc[1]), FlagReg2);
1039 TII.get(SETFOpc[2]), ResultReg)
1074 bool X86InstructionSelector::selectUadde(
MachineInstr &I,
1077 assert((I.
getOpcode() == TargetOpcode::G_UADDE) &&
"unexpected instruction");
1087 if (DstTy != LLT::scalar(32))
1092 while (Def->
getOpcode() == TargetOpcode::G_TRUNC) {
1098 if (Def->
getOpcode() == TargetOpcode::G_UADDE) {
1102 .addReg(CarryInReg);
1104 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
1107 Opcode = X86::ADC32rr;
1113 Opcode = X86::ADD32rr;
1123 .addReg(X86::EFLAGS);
1126 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
1133 bool X86InstructionSelector::selectExtract(
MachineInstr &I,
1137 "unexpected instruction");
1155 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1162 bool HasAVX = STI.hasAVX();
1163 bool HasAVX512 = STI.hasAVX512();
1164 bool HasVLX = STI.hasVLX();
1168 I.
setDesc(
TII.get(X86::VEXTRACTF32x4Z256rr));
1190 bool X86InstructionSelector::emitExtractSubreg(
unsigned DstReg,
unsigned SrcReg,
1196 unsigned SubIdx = X86::NoSubRegister;
1202 "Incorrect Src/Dst register size");
1205 SubIdx = X86::sub_xmm;
1207 SubIdx = X86::sub_ymm;
1214 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1216 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1217 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1223 .addReg(SrcReg, 0, SubIdx);
1228 bool X86InstructionSelector::emitInsertSubreg(
unsigned DstReg,
unsigned SrcReg,
1234 unsigned SubIdx = X86::NoSubRegister;
1241 "Incorrect Src/Dst register size");
1244 SubIdx = X86::sub_xmm;
1246 SubIdx = X86::sub_ymm;
1253 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1254 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1266 bool X86InstructionSelector::selectInsert(
MachineInstr &I,
1269 assert((I.
getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1277 const LLT InsertRegTy = MRI.
getType(InsertReg);
1288 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1295 bool HasAVX = STI.hasAVX();
1296 bool HasAVX512 = STI.hasAVX512();
1297 bool HasVLX = STI.hasVLX();
1328 "unexpected instruction");
1335 for (
unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1342 if (!select(ExtrInst, CoverageInfo))
1354 I.
getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1355 "unexpected instruction");
1374 unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1375 MRI.setRegBank(Tmp, RegBank);
1378 TII.get(TargetOpcode::G_INSERT), Tmp)
1381 .addImm((Idx - 1) * SrcSize);
1385 if (!select(InsertInst, CoverageInfo))
1390 TII.get(TargetOpcode::COPY), DstReg)
1393 if (!select(CopyInst, CoverageInfo))
1400 bool X86InstructionSelector::selectCondBranch(
MachineInstr &I,
1403 assert((I.
getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1421 bool X86InstructionSelector::materializeFP(
MachineInstr &I,
1425 "unexpected instruction");
1438 unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1444 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1452 .addConstantPoolIndex(CPI, 0, OpFlag);
1455 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1468 unsigned PICBase = 0;
1487 bool X86InstructionSelector::selectImplicitDefOrPHI(
1490 I.
getOpcode() == TargetOpcode::G_PHI) &&
1491 "unexpected instruction");
1499 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1506 if (I.
getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1518 bool X86InstructionSelector::selectShift(
MachineInstr &I,
1523 I.
getOpcode() == TargetOpcode::G_ASHR ||
1524 I.
getOpcode() == TargetOpcode::G_LSHR) &&
1525 "unexpected instruction");
1531 const static struct ShiftEntry {
1532 unsigned SizeInBits;
1538 {8, X86::CL, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL},
1539 {16, X86::CX, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL},
1540 {32,
X86::ECX, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL},
1541 {64, X86::RCX, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL}
1544 if (DstRB.
getID() != X86::GPRRegBankID)
1551 if (ShiftEntryIt ==
std::end(OpTable))
1554 unsigned CReg = ShiftEntryIt->CReg;
1555 unsigned Opcode = 0;
1557 case TargetOpcode::G_SHL:
1558 Opcode = ShiftEntryIt->OpSHL;
1560 case TargetOpcode::G_ASHR:
1561 Opcode = ShiftEntryIt->OpASHR;
1563 case TargetOpcode::G_LSHR:
1564 Opcode = ShiftEntryIt->OpLSHR;
1579 if (CReg != X86::CL)
1593 bool X86InstructionSelector::selectDivRem(
MachineInstr &I,
1598 I.
getOpcode() == TargetOpcode::G_SREM ||
1599 I.
getOpcode() == TargetOpcode::G_UDIV ||
1600 I.
getOpcode() == TargetOpcode::G_UREM) &&
1601 "unexpected instruction");
1609 "Arguments and return value types must match");
1612 if (RegRB.getID() != X86::GPRRegBankID)
1615 const static unsigned NumTypes = 4;
1616 const static unsigned NumOps = 4;
1617 const static bool S =
true;
1618 const static bool U =
false;
1619 const static unsigned Copy = TargetOpcode::COPY;
1628 const static struct DivRemEntry {
1630 unsigned SizeInBits;
1634 struct DivRemResult {
1636 unsigned OpSignExtend;
1640 unsigned DivRemResultReg;
1642 } ResultTable[NumOps];
1643 } OpTable[NumTypes] = {
1648 {X86::IDIV8r, 0, X86::MOVSX16rr8,
X86::AL, S},
1649 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},
1650 {X86::DIV8r, 0, X86::MOVZX16rr8,
X86::AL, U},
1651 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U},
1657 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S},
1658 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S},
1659 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U},
1660 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U},
1666 {X86::IDIV32r, X86::CDQ, Copy,
X86::EAX, S},
1667 {X86::IDIV32r, X86::CDQ, Copy,
X86::EDX, S},
1668 {X86::DIV32r, X86::MOV32r0, Copy,
X86::EAX, U},
1669 {X86::DIV32r, X86::MOV32r0, Copy,
X86::EDX, U},
1675 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S},
1676 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S},
1677 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U},
1678 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U},
1683 [RegTy](
const DivRemEntry &El) {
1686 if (OpEntryIt ==
std::end(OpTable))
1693 case TargetOpcode::G_SDIV:
1696 case TargetOpcode::G_SREM:
1699 case TargetOpcode::G_UDIV:
1702 case TargetOpcode::G_UREM:
1707 const DivRemEntry &TypeEntry = *OpEntryIt;
1708 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1711 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1712 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1713 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1724 if (OpEntry.OpSignExtend) {
1725 if (OpEntry.IsOpSigned)
1727 TII.get(OpEntry.OpSignExtend));
1738 TypeEntry.HighInReg)
1739 .addReg(Zero32, 0, X86::sub_16bit);
1742 TypeEntry.HighInReg)
1746 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1764 if ((I.
getOpcode() == Instruction::SRem ||
1766 OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) {
1775 .addReg(SourceSuperReg)
1780 TII.get(TargetOpcode::SUBREG_TO_REG))
1788 .addReg(OpEntry.DivRemResultReg);
1794 bool X86InstructionSelector::selectIntrinsicWSideEffects(
1798 "unexpected instruction");
1813 return new X86InstructionSelector(TM, Subtarget, RBI);
const_iterator end(StringRef path)
Get end iterator over path.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This class is the base class for the comparison instructions.
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
MachineBasicBlock * getMBB() const
Atomic ordering constants.
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
This class represents lattice values for constants.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
union llvm::X86AddressMode::@497 Base
unsigned getReg() const
getReg - Returns the register number.
static uint64_t selectImpl(uint64_t CandidateMask, uint64_t &NextInSequenceMask)
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
unsigned const TargetRegisterInfo * TRI
An instruction for reading from memory.
void setRegBank(unsigned Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static const TargetRegisterClass * getRegClassFromGRPhysReg(unsigned Reg)
return AArch64::GPR64RegClass contains(Reg)
A description of a memory reference used in the backend.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Retuns the total number of operands.
This class provides the information for the target register banks.
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
static StringRef getName(Value *V)
static int getRegClass(RegisterKind Is, unsigned RegWidth)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
enum llvm::X86AddressMode::@496 BaseType
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
const GlobalValue * getGlobal() const
ConstantFP - Floating Point Values [float, double].
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
int64_t addOffset(int64_t LHS, int64_t RHS)
void substPhysReg(unsigned Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setImm(int64_t immVal)
TRAP - Trapping instruction.
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand=false)
Return a set opcode for the given condition and whether it has a memory operand.
bool isImplicitDef() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
This file declares the targeting of the RegisterBankInfo class for X86.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
constexpr bool isInt< 32 >(int64_t x)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
MachineOperand class - Representation of each machine instruction operand.
Intrinsic::ID getIntrinsicID() const
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &, X86RegisterBankInfo &)
This class implements the register bank concept.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
const MachineBasicBlock * getParent() const
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – t...
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
void setSubReg(unsigned subReg)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
#define GET_GLOBALISEL_TEMPORARIES_INIT
const TargetRegisterClass * getRegClassOrNull(unsigned Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
X86AddressMode - This struct holds a generalized full x86 address mode.
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
const MachineOperand & getOperand(unsigned i) const
const ConstantInt * getCImm() const
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
unsigned getID() const
Get the identifier of this register bank.
unsigned getPredicate() const