66 #define DEBUG_TYPE "hexagon-pei" 150 using namespace llvm;
173 cl::desc(
"Enable stack frame shrink wrapping"));
177 cl::desc(
"Max count of stack frame shrink-wraps"));
225 bool HexagonCallFrameInformation::runOnMachineFunction(
MachineFunction &MF) {
232 HFI.insertCFIInstructions(MF);
237 "Hexagon call frame information",
false,
false)
240 return new HexagonCallFrameInformation();
248 if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
254 if (*SubRegs > RegNo)
257 if (!RegNo || *SubRegs < RegNo)
267 static_assert(Hexagon::R1 > 0,
268 "Assume physical registers are encoded as positive integers");
273 for (
unsigned I = 1,
E = CSI.size();
I <
E; ++
I) {
285 for (
auto &
I : MBB) {
291 case Hexagon::PS_alloca:
292 case Hexagon::PS_aligna:
307 unsigned R = MO.getReg();
317 if (MO.isRegMask()) {
322 const uint32_t *BM = MO.getRegMask();
326 if (!(BM[R/32] & (1u << (R%32))))
341 unsigned RetOpc = I->getOpcode();
342 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
364 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
365 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
366 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
367 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
368 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
369 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
370 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
371 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
397 static unsigned ShrinkCounter = 0;
418 for (RPOTType::rpo_iterator
I = RPOT.begin(),
E = RPOT.end();
I !=
E; ++
I)
419 RPO[(*I)->getNumber()] = RPON++;
425 unsigned BN = RPO[
I.getNumber()];
426 for (
auto SI =
I.succ_begin(), SE =
I.succ_end();
SI != SE; ++
SI) {
428 if (RPO[(*SI)->getNumber()] <= BN)
437 for (
const MCPhysReg *
P = HRI.getCalleeSavedRegs(&MF); *
P; ++
P)
446 dbgs() <<
"Blocks needing SF: {";
447 for (
auto &
B : SFBlocks)
452 if (SFBlocks.
empty())
457 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
463 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
469 dbgs() <<
"Computed dom block: ";
474 dbgs() <<
", computed pdom block: ";
490 LLVM_DEBUG(
dbgs() <<
"PDom block does not post-dominate dom block\n");
513 findShrunkPrologEpilog(MF, PrologB, EpilogB);
515 bool PrologueStubs =
false;
516 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
517 insertPrologueInBlock(*PrologB, PrologueStubs);
518 updateEntryPaths(MF, *PrologB);
521 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
522 insertEpilogueInBlock(*EpilogB);
525 if (
B.isReturnBlock())
526 insertCSRRestoresInBlock(
B, CSI, HRI);
529 if (
B.isReturnBlock())
530 insertEpilogueInBlock(
B);
548 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
549 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
576 assert(!MFI.hasVarSizedObjects() &&
577 !HST.getRegisterInfo()->needsStackRealignment(MF));
581 MFI.getStackSize() == 0;
585 bool PrologueStubs)
const {
590 auto &HRI = *HST.getRegisterInfo();
601 FrameSize = MaxCFA +
alignTo(FrameSize, MaxAlign);
604 bool AlignStack = (MaxAlign > getStackAlignment());
608 unsigned SP = HRI.getStackRegister();
615 if (
MI.getOpcode() == Hexagon::PS_alloca)
618 for (
auto MI : AdjustRegs) {
619 assert((
MI->getOpcode() == Hexagon::PS_alloca) &&
"Expected alloca");
620 expandAlloca(
MI, HII, SP, MaxCF);
621 MI->eraseFromParent();
624 DebugLoc dl = MBB.findDebugLoc(InsertPt);
627 insertAllocframe(MBB, InsertPt, NumBytes);
629 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
631 .
addImm(-int64_t(MaxAlign));
637 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
638 .addExternalSymbol(
"__runtime_stack_check");
639 }
else if (NumBytes > 0) {
641 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
651 auto &HRI = *HST.getRegisterInfo();
652 unsigned SP = HRI.getStackRegister();
660 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
668 unsigned RetOpc = RetI ? RetI->
getOpcode() : 0;
671 if (RetOpc == Hexagon::EH_RETURN_JMPR) {
672 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
673 .addDef(Hexagon::D15)
675 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
683 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
684 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
685 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
686 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
690 while (It != MBB.
end()) {
702 bool NeedsDeallocframe =
true;
703 if (!MBB.
empty() && InsertPt != MBB.
begin()) {
705 unsigned COpc = PrevIt->getOpcode();
706 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
707 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
708 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
709 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
710 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
711 NeedsDeallocframe =
false;
714 if (!NeedsDeallocframe)
720 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
721 .addDef(Hexagon::D15)
725 unsigned NewOpc = Hexagon::L4_return;
727 .addDef(Hexagon::D15)
739 auto &HRI = *HST.getRegisterInfo();
743 const unsigned int ALLOCFRAME_MAX = 16384;
751 unsigned SP = HRI.getStackRegister();
753 if (NumBytes >= ALLOCFRAME_MAX) {
755 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
762 unsigned SP = HRI.getStackRegister();
763 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
767 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
785 for (
unsigned i = 0; i < Worklist.
size(); ++i) {
786 unsigned BN = Worklist[i];
793 Worklist.
insert(SB->getNumber());
802 if (Path[BN] || DoneF[BN])
810 bool ReachedExit =
false;
812 ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
828 if (ReachedExit && &MBB != &RestoreB) {
853 if (
I.getOpcode() == Hexagon::S2_allocframe)
854 return std::next(It);
858 bool HasCall =
false, HasAllocFrame =
false;
860 while (++
T != End &&
T->isBundled()) {
861 if (
T->getOpcode() == Hexagon::S2_allocframe)
862 HasAllocFrame =
true;
863 else if (
T->isCall())
867 return HasCall ? It : std::next(It);
876 insertCFIInstructionsAt(
B, At.getValue());
887 auto &HRI = *HST.getRegisterInfo();
893 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
896 bool HasFP = hasFP(MF);
899 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(),
true);
900 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(),
true);
926 static unsigned int RegsToMove[] = {
927 Hexagon::R1, Hexagon::R0, Hexagon::R3,
Hexagon::R2,
928 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
929 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
930 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
931 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
932 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,
938 for (
unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {
939 unsigned Reg = RegsToMove[i];
941 return C.getReg() ==
Reg;
959 Offset = getFrameIndexReference(MF,
F->getFrameIdx(), FrameReg);
964 if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
965 unsigned DwarfReg = HRI.getDwarfRegNum(Reg,
true);
977 unsigned HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
978 unsigned LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
979 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg,
true);
980 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg,
true);
999 bool HasExtraAlign = HRI.needsStackRealignment(MF);
1000 bool HasAlloca = MFI.hasVarSizedObjects();
1015 if (HasAlloca || HasExtraAlign)
1018 if (MFI.getStackSize() > 0) {
1041 bool Stkchk =
false) {
1042 const char * V4SpillToMemoryFunctions[] = {
1043 "__save_r16_through_r17",
1044 "__save_r16_through_r19",
1045 "__save_r16_through_r21",
1046 "__save_r16_through_r23",
1047 "__save_r16_through_r25",
1048 "__save_r16_through_r27" };
1050 const char * V4SpillToMemoryStkchkFunctions[] = {
1051 "__save_r16_through_r17_stkchk",
1052 "__save_r16_through_r19_stkchk",
1053 "__save_r16_through_r21_stkchk",
1054 "__save_r16_through_r23_stkchk",
1055 "__save_r16_through_r25_stkchk",
1056 "__save_r16_through_r27_stkchk" };
1058 const char * V4SpillFromMemoryFunctions[] = {
1059 "__restore_r16_through_r17_and_deallocframe",
1060 "__restore_r16_through_r19_and_deallocframe",
1061 "__restore_r16_through_r21_and_deallocframe",
1062 "__restore_r16_through_r23_and_deallocframe",
1063 "__restore_r16_through_r25_and_deallocframe",
1064 "__restore_r16_through_r27_and_deallocframe" };
1066 const char * V4SpillFromMemoryTailcallFunctions[] = {
1067 "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1068 "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1069 "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1070 "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1071 "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1072 "__restore_r16_through_r27_and_deallocframe_before_tailcall" 1075 const char **SpillFunc =
nullptr;
1079 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1080 : V4SpillToMemoryFunctions;
1083 SpillFunc = V4SpillFromMemoryFunctions;
1086 SpillFunc = V4SpillFromMemoryTailcallFunctions;
1089 assert(SpillFunc &&
"Unknown spill kind");
1094 return SpillFunc[0];
1096 return SpillFunc[1];
1098 return SpillFunc[2];
1100 return SpillFunc[3];
1102 return SpillFunc[4];
1104 return SpillFunc[5];
1112 int FI,
unsigned &FrameReg)
const {
1116 int Offset = MFI.getObjectOffset(FI);
1117 bool HasAlloca = MFI.hasVarSizedObjects();
1118 bool HasExtraAlign = HRI.needsStackRealignment(MF);
1122 unsigned FrameSize = MFI.getStackSize();
1123 unsigned SP = HRI.getStackRegister();
1124 unsigned FP = HRI.getFrameRegister();
1125 unsigned AP = HMFI.getStackAlignBasePhysReg();
1143 bool UseFP =
false, UseAP =
false;
1148 if (NoOpt && !HasExtraAlign)
1150 if (MFI.isFixedObjectIndex(FI) || MFI.isObjectPreAllocated(FI)) {
1153 UseFP |= (HasAlloca || HasExtraAlign);
1164 bool HasFP = hasFP(MF);
1165 assert((HasFP || !UseFP) &&
"This function must have frame pointer");
1191 if (Offset > 0 && !HasFP)
1206 if (!UseFP && !UseAP)
1207 RealOffset = FrameSize+
Offset;
1213 bool &PrologueStubs)
const {
1218 PrologueStubs =
false;
1223 if (useSpillFunction(MF, CSI)) {
1224 PrologueStubs =
true;
1236 if (StkOvrFlowEnabled) {
1238 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1239 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1241 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1242 : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1245 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1246 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1248 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1249 : Hexagon::SAVE_REGISTERS_CALL_V4;
1253 BuildMI(MBB, MI, DL, HII.get(SpillOpc))
1254 .addExternalSymbol(SpillFun);
1257 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI,
false,
true);
1259 for (
unsigned I = 0;
I < CSI.size(); ++
I)
1264 for (
unsigned i = 0, n = CSI.size(); i < n; ++i) {
1265 unsigned Reg = CSI[i].getReg();
1270 int FI = CSI[i].getFrameIdx();
1272 HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI);
1289 if (useRestoreFunction(MF, CSI)) {
1306 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1307 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1309 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1310 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1311 DeallocCall =
BuildMI(MBB, MI, DL, HII.get(RetOpc))
1312 .addExternalSymbol(RestoreFn);
1316 assert(It->isReturn() && std::next(It) == MBB.
end());
1319 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1320 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1322 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1323 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1324 DeallocCall =
BuildMI(MBB, It, DL, HII.get(RetOpc))
1325 .addExternalSymbol(RestoreFn);
1329 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI,
true,
false);
1333 for (
unsigned i = 0; i < CSI.size(); ++i) {
1334 unsigned Reg = CSI[i].getReg();
1336 int FI = CSI[i].getFrameIdx();
1337 HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
1349 assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
1350 "Cannot handle this call frame pseudo instruction");
1351 return MBB.
erase(I);
1364 if (!HasAlloca || !NeedsAlign)
1382 assert(A <= 8 &&
"Unexpected local frame alignment");
1390 AP = AI->getOperand(0).getReg();
1400 auto IsUsed = [&HRI,&
MRI] (
unsigned Reg) ->
bool {
1432 BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1441 for (
unsigned i = 0, n = CSI.size(); i < n; ++i) {
1442 unsigned R = CSI[i].getReg();
1454 for (
int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) {
1468 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1474 for (
int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1513 const SpillSlot *FixedSlots = getCalleeSavedSpillSlots(NumFixed);
1514 for (
const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1518 int FI = MFI.CreateFixedSpillStackObject(TRI->
getSpillSize(*RC), S->Offset);
1519 MinOffset = std::min(MinOffset, S->Offset);
1521 SRegs[S->Reg] =
false;
1531 int Off = MinOffset -
Size;
1535 int FI = MFI.CreateFixedSpillStackObject(Size, Off);
1536 MinOffset = std::min(MinOffset, Off);
1542 dbgs() <<
"CS information: {";
1543 for (
unsigned i = 0, n = CSI.size(); i < n; ++i) {
1544 int FI = CSI[i].getFrameIdx();
1545 int Off = MFI.getObjectOffset(FI);
1556 bool MissedReg =
false;
1576 if (!Hexagon::ModRegsRegClass.
contains(DstR) ||
1577 !Hexagon::ModRegsRegClass.
contains(SrcR))
1582 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)
1606 unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1607 : Hexagon::A2_tfrcrr;
1608 BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)
1612 BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
1637 BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1644 unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1645 : Hexagon::A2_tfrrcr;
1646 BuildMI(B, It, DL, HII.get(TfrOpc), DstR)
1665 auto *RC = &Hexagon::HvxVRRegClass;
1674 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1675 .addImm(0x01010101);
1677 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1683 expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
1701 auto *RC = &Hexagon::HvxVRRegClass;
1709 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1710 .addImm(0x01010101);
1714 expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
1716 BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
1743 for (
auto R = B.
begin(); R != It; ++R) {
1750 unsigned SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1751 unsigned SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1755 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1756 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1757 unsigned HasAlign = MFI.getObjectAlignment(FI);
1762 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1763 : Hexagon::V6_vS32Ub_ai;
1764 BuildMI(B, It, DL, HII.get(StoreOpc))
1773 StoreOpc = NeedAlign <=
MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai
1774 : Hexagon::V6_vS32Ub_ai;
1775 BuildMI(B, It, DL, HII.get(StoreOpc))
1798 unsigned DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1799 unsigned DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1802 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1803 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1804 unsigned HasAlign = MFI.getObjectAlignment(FI);
1808 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1809 : Hexagon::V6_vL32Ub_ai;
1810 BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
1816 LoadOpc = NeedAlign <=
MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai
1817 : Hexagon::V6_vL32Ub_ai;
1818 BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
1842 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1843 unsigned HasAlign = MFI.getObjectAlignment(FI);
1844 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1845 : Hexagon::V6_vS32Ub_ai;
1846 BuildMI(B, It, DL, HII.get(StoreOpc))
1870 unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1871 unsigned HasAlign = MFI.getObjectAlignment(FI);
1872 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1873 : Hexagon::V6_vL32Ub_ai;
1874 BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
1887 bool Changed =
false;
1889 for (
auto &B : MF) {
1894 NextI = std::next(
I);
1898 case TargetOpcode::COPY:
1899 Changed |= expandCopy(B,
I, MRI, HII, NewRegs);
1901 case Hexagon::STriw_pred:
1902 case Hexagon::STriw_ctr:
1903 Changed |= expandStoreInt(B,
I, MRI, HII, NewRegs);
1905 case Hexagon::LDriw_pred:
1906 case Hexagon::LDriw_ctr:
1907 Changed |= expandLoadInt(B,
I, MRI, HII, NewRegs);
1909 case Hexagon::PS_vstorerq_ai:
1910 Changed |= expandStoreVecPred(B,
I, MRI, HII, NewRegs);
1912 case Hexagon::PS_vloadrq_ai:
1913 Changed |= expandLoadVecPred(B,
I, MRI, HII, NewRegs);
1915 case Hexagon::PS_vloadrw_ai:
1916 case Hexagon::PS_vloadrwu_ai:
1917 Changed |= expandLoadVec2(B,
I, MRI, HII, NewRegs);
1919 case Hexagon::PS_vstorerw_ai:
1920 case Hexagon::PS_vstorerwu_ai:
1921 Changed |= expandStoreVec2(B,
I, MRI, HII, NewRegs);
1935 SavedRegs.
resize(HRI.getNumRegs());
1945 expandSpillMacros(MF, NewRegs);
1947 optimizeSpillSlots(MF, NewRegs);
1951 if (!NewRegs.
empty() || mayOverflowFrameOffset(MF)) {
1957 SpillRCs.
insert(&Hexagon::IntRegsRegClass);
1959 for (
unsigned VR : NewRegs)
1962 for (
auto *RC : SpillRCs) {
1966 unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC);
1967 for (
unsigned i = 0; i < Num; i++) {
1985 auto isDead = [&FIR,&DeadMap] (
unsigned Reg) ->
bool {
1986 auto F = DeadMap.find({
Reg,0});
1987 if (
F == DeadMap.end())
1989 for (
auto &DR :
F->second)
1990 if (DR.contains(FIR))
2012 auto &HII = *HST.getInstrInfo();
2013 auto &HRI = *HST.getRegisterInfo();
2017 using BlockIndexMap =
2018 std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2019 using BlockRangeMap =
2020 std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2028 SlotInfo() =
default;
2031 BlockIndexMap BlockIndexes;
2033 std::map<int,SlotInfo> FIRangeMap;
2042 if (HaveRC ==
nullptr || HaveRC == NewRC)
2047 if (NewRC->hasSubClassEq(HaveRC))
2054 for (
auto &B : MF) {
2055 std::map<int,IndexType> LastStore, LastLoad;
2058 auto P = BlockIndexes.insert(
2060 auto &IndexMap =
P.first->second;
2062 << IndexMap <<
'\n');
2064 for (
auto &
In : B) {
2066 bool Load = HII.isLoadFromStackSlot(
In, LFI) && !HII.isPredicated(
In);
2067 bool Store = HII.isStoreToStackSlot(
In, SFI) && !HII.isPredicated(
In);
2068 if (Load && Store) {
2080 if (Load || Store) {
2081 int TFI = Load ? LFI : SFI;
2082 unsigned AM = HII.getAddrMode(
In);
2083 SlotInfo &
SI = FIRangeMap[TFI];
2087 unsigned OpNum = Load ? 0 : 2;
2088 auto *RC = HII.getRegClass(
In.getDesc(), OpNum, &HRI, MF);
2089 RC = getCommonRC(SI.RC, RC);
2097 unsigned S = HII.getMemAccessSize(
In);
2098 if (SI.Size != 0 && SI.Size != S)
2104 for (
auto *Mo :
In.memoperands()) {
2105 if (!Mo->isVolatile())
2116 for (
unsigned i = 0, n =
In.getNumOperands(); i < n; ++i) {
2123 if (i+1 >= n || !
In.getOperand(i+1).isImm() ||
2124 In.getOperand(i+1).getImm() != 0)
2126 if (BadFIs.
count(FI))
2132 LastStore[FI] = IndexType::Entry;
2133 LastLoad[FI] =
Index;
2137 RL.
add(LastStore[FI], LastLoad[FI],
false,
false);
2139 RL.
add(IndexType::Entry, LastLoad[FI],
false,
false);
2141 LastStore[FI] =
Index;
2148 for (
auto &
I : LastLoad) {
2149 IndexType LL =
I.second;
2152 auto &RL = FIRangeMap[
I.first].Map[&
B];
2153 IndexType &
LS = LastStore[
I.first];
2155 RL.add(LS, LL,
false,
false);
2157 RL.add(IndexType::Entry, LL,
false,
false);
2160 for (
auto &
I : LastStore) {
2161 IndexType
LS =
I.second;
2164 auto &RL = FIRangeMap[
I.first].Map[&
B];
2170 for (
auto &
P : FIRangeMap) {
2171 dbgs() <<
"fi#" <<
P.first;
2172 if (BadFIs.
count(
P.first))
2175 if (
P.second.RC !=
nullptr)
2176 dbgs() << HRI.getRegClassName(
P.second.RC) <<
'\n';
2178 dbgs() <<
"<null>\n";
2179 for (
auto &R :
P.second.Map)
2190 std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2192 for (
auto &
P : FIRangeMap) {
2194 if (BadFIs.
count(
P.first))
2196 for (
auto &B : MF) {
2197 auto F =
P.second.Map.find(&B);
2199 if (
F ==
P.second.Map.end() ||
F->second.empty())
2202 if (IR.
start() == IndexType::Entry)
2203 LoxFIs.insert(
P.first);
2204 BlockFIMap[&
B].push_back(
P.first);
2209 dbgs() <<
"Block-to-FI map (* -- live-on-exit):\n";
2210 for (
auto &
P : BlockFIMap) {
2211 auto &FIs =
P.second;
2215 for (
auto I : FIs) {
2216 dbgs() <<
" fi#" <<
I;
2217 if (LoxFIs.count(
I))
2229 for (
auto &B : MF) {
2230 auto F = BlockIndexes.find(&B);
2231 assert(
F != BlockIndexes.end());
2238 for (
auto FI : BlockFIMap[&B]) {
2239 if (BadFIs.
count(FI))
2243 for (
auto &Range : RL) {
2245 if (!IndexType::isInstr(Range.start()) ||
2246 !IndexType::isInstr(Range.end()))
2255 SrcOp.getSubReg() };
2256 auto *RC = HII.getRegClass(SI.
getDesc(), 2, &HRI, MF);
2258 unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
2274 if (SrcRR.
Reg != FoundR || SrcRR.
Sub != 0) {
2276 CopyIn =
BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)
2282 if (LoxFIs.count(FI) && (&Range == &RL.back())) {
2284 if (
unsigned SR = SrcOp.getSubReg())
2285 SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2287 SrcOp.setReg(FoundR);
2290 SrcOp.setIsKill(
false);
2293 IM.replaceInstr(&SI, CopyIn);
2297 for (
auto It = StartIt; It != EndIt; It = NextIt) {
2299 NextIt = std::next(It);
2301 if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)
2306 if (DstR != FoundR) {
2308 unsigned MemSize = HII.getMemAccessSize(MI);
2310 unsigned CopyOpc = TargetOpcode::COPY;
2311 if (HII.isSignExtendingLoad(MI))
2312 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2313 else if (HII.isZeroExtendingLoad(MI))
2314 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2315 CopyOut =
BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
2318 IM.replaceInstr(&MI, CopyOut);
2325 DM[RR].subtract(Range);
2331 void HexagonFrameLowering::expandAlloca(
MachineInstr *AI,
2357 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd)
2362 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP)
2368 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd)
2372 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP)
2378 BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP)
2383 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd)
2394 if (MaxA <= getStackAlignment())
2403 if (
I.getOpcode() == Hexagon::PS_aligna)
2410 void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(
MachineInstr *
MI,
2411 const CSIVect &CSI,
bool IsDef,
bool IsKill)
const {
2422 const CSIVect &CSI)
const {
2433 BitVector Regs(Hexagon::NUM_TARGET_REGS);
2434 for (
unsigned i = 0, n = CSI.size(); i < n; ++i) {
2435 unsigned R = CSI[i].getReg();
2436 if (!Hexagon::DoubleRegsRegClass.
contains(R))
2441 if (F != Hexagon::D8)
2445 if (N >= 0 && N != F+1)
2453 bool HexagonFrameLowering::useSpillFunction(
const MachineFunction &MF,
2454 const CSIVect &CSI)
const {
2455 if (shouldInlineCSR(MF, CSI))
2457 unsigned NumCSI = CSI.size();
2463 return Threshold < NumCSI;
2466 bool HexagonFrameLowering::useRestoreFunction(
const MachineFunction &MF,
2467 const CSIVect &CSI)
const {
2468 if (shouldInlineCSR(MF, CSI))
2478 unsigned NumCSI = CSI.size();
2484 return Threshold < NumCSI;
2487 bool HexagonFrameLowering::mayOverflowFrameOffset(
MachineFunction &MF)
const {
2492 if (HST.useHVXOps() && StackSize > 256)
2499 bool HasImmStack =
false;
2500 unsigned MinLS = ~0u;
2506 case Hexagon::S4_storeirit_io:
2507 case Hexagon::S4_storeirif_io:
2508 case Hexagon::S4_storeiri_io:
2511 case Hexagon::S4_storeirht_io:
2512 case Hexagon::S4_storeirhf_io:
2513 case Hexagon::S4_storeirh_io:
2516 case Hexagon::S4_storeirbt_io:
2517 case Hexagon::S4_storeirbf_io:
2518 case Hexagon::S4_storeirb_io:
2521 MinLS = std::min(MinLS, LS);
2528 return !isUInt<6>(StackSize >> MinLS);
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
bool isCall(QueryType Type=AnyInBundle) const
instr_iterator instr_end()
bool hasDebugInfo() const
Returns true if valid debug info is present.
void mapLocalFrameObject(int ObjectIndex, int64_t Offset)
Map a frame index into the local object block.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1), cl::ZeroOrMore)
This class represents lattice values for constants.
size_type size() const
Determine the number of elements in the SetVector.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
INITIALIZE_PASS(HexagonCallFrameInformation, "hexagon-cfi", "Hexagon call frame information", false, false) FunctionPass *llvm
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF) const
Returns the preferred order for allocating registers from this register class in MF.
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
void push_back(const T &Elt)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
static Optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Describe properties that are true of each instruction in the target description file.
unsigned getReg() const
getReg - Returns the register number.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset)
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)
unsigned getSubReg() const
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Hexagon target-specific information for each MachineFunction.
unsigned const TargetRegisterInfo * TRI
MachineModuleInfo & getMMI() const
static const char * getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType, bool Stkchk=false)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
bool contains(MCPhysReg Reg) const
Returns true if register Reg is contained in the set.
static unsigned SpillOptCount
static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6), cl::ZeroOrMore)
iterator_range< mop_iterator > operands()
static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
void setStackAlignBasePhysReg(unsigned R)
return AArch64::GPR64RegClass contains(Reg)
iterator_range< succ_iterator > successors()
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
std::map< RegisterRef, RangeList > RegToRangeMap
bool isEHReturnCalleeSaveReg(unsigned Reg) const
unsigned getSpillAlignment(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class...
void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
void setLocalFrameSize(int64_t sz)
Set the size of the local object blob.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void setUseLocalStackAllocationBlock(bool v)
setUseLocalStackAllocationBlock - Set whether the local allocation blob should be allocated together ...
MCSuperRegIterator enumerates all super-registers of Reg.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
int find_first() const
find_first - Returns the index of the first set bit, -1 if none of the bits are set.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false), cl::ZeroOrMore)
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int find_next(unsigned Prev) const
find_next - Returns the index of the next set bit following the "Prev" bit.
static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Enable stack frame shrink wrapping"))
This file contains the simple types necessary to represent the attributes associated with functions a...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static unsigned getMax32BitSubRegister(unsigned Reg, const TargetRegisterInfo &TRI, bool hireg=true)
Map a register pair Reg to the subregister that has the greater "number", i.e.
LLVM_NODISCARD unsigned addFrameInst(const MCCFIInstruction &Inst)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
const MachineInstr * getAlignaInstr(const MachineFunction &MF) const
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool insert(const value_type &X)
Insert a new element into the SetVector.
static bool isMinSize(const MachineFunction &MF)
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
const MCContext & getContext() const
instr_iterator getInstrIterator() const
static bool hasTailCall(const MachineBasicBlock &MBB)
Returns true if MBB has a machine instructions that indicates a tail call in the block.
void setLocalFrameMaxAlign(unsigned Align)
Required alignment of the local object blob, which is the strictest alignment of any object in it...
int getObjectIndexEnd() const
Return one past the maximum frame object index.
iterator getLastNonDebugInstr()
Returns an iterator to the last non-debug instruction in the basic block, or end().
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B...
const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const
static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)
Checks if the basic block contains any instruction that needs a stack frame to be already in place...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Perform most of the PEI work here:
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getKillRegState(bool B)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static cl::opt< bool > EliminateFramePointer("hexagon-fp-elim", cl::init(true), cl::Hidden, cl::desc("Refrain from using FP whenever possible"))
bool isReturn(QueryType Type=AnyInBundle) const
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)
Returns true if there are no caller-saved registers available in class RC.
static bool isOptSize(const MachineFunction &MF)
initializer< Ty > init(const Ty &Val)
static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2), cl::ZeroOrMore)
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
This file declares the machine register scavenger class.
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
MCSymbol * createTempSymbol(bool CanBeUnnamed=true)
Create and return a new assembler temporary symbol with a unique but unspecified name.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned const MachineRegisterInfo * MRI
static MCCFIInstruction createDefCfa(MCSymbol *L, unsigned Register, int Offset)
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void setStackSize(uint64_t Size)
Set the size of the stack.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE and DBG_LABEL instructions...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
MCRegAliasIterator enumerates all registers aliasing Reg.
unsigned getMaxAlignment() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
bool optForSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
FunctionPass class - This class is used to implement most global optimizations.
self_iterator getIterator()
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
int CreateSpillStackObject(uint64_t Size, unsigned Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
MCSubRegIterator enumerates all sub-registers of Reg.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
IndexType getIndex(MachineInstr *MI) const
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
static MachineInstr * getReturn(MachineBasicBlock &MBB)
Returns the "return" instruction from this block, or nullptr if there isn't any.
static bool hasReturn(const MachineBasicBlock &MBB)
Returns true if MBB contains an instruction that returns.
The memory access writes data.
static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::ZeroOrMore, cl::desc("Max count of stack frame shrink-wraps"))
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call...
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
unsigned estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isPhysRegUsed(unsigned PhysReg) const
Return true if the specified register is modified or read in this function.
static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false), cl::ZeroOrMore)
virtual BitVector getReservedRegs(const MachineFunction &MF) const =0
Returns a bitset indexed by physical register number indicating if a register is a special register t...
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
FunctionPass * createHexagonCallFrameInformation()
static cl::opt< unsigned > Threshold("loop-unswitch-threshold", cl::desc("Max loop size to unswitch"), cl::init(100), cl::Hidden)
bool runOnMachineFunction(MachineFunction &F) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const MachineBasicBlock * getParent() const
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
MachineFunctionProperties & set(Property P)
Representation of each machine instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
RegToRangeMap computeLiveMap(InstrIndexMap &IndexMap)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
static bool isRestoreCall(unsigned Opc)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
static bool enableAllocFrameElim(const MachineFunction &MF)
LLVM_NODISCARD bool empty() const
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
bool isPositionIndependent() const
static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))
void setMaxCallFrameSize(unsigned S)
void insertCFIInstructions(MachineFunction &MF) const
unsigned getLocalFrameMaxAlign() const
Return the required alignment of the local object blob.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
RegToRangeMap computeDeadMap(InstrIndexMap &IndexMap, RegToRangeMap &LiveMap)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool enableCalleeSaveSkip(const MachineFunction &MF) const override
Returns true if the target can safely skip saving callee-saved registers for noreturn nounwind functi...
const HexagonInstrInfo * getInstrInfo() const override
rpo Deduce function attributes in RPO
A vector that has set insertion semantics.
static bool isOptNone(const MachineFunction &MF)
Primary interface to the complete machine description for the target machine.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
void stepForward(const MachineInstr &MI, SmallVectorImpl< std::pair< MCPhysReg, const MachineOperand *>> &Clobbers)
Simulates liveness when stepping forward over an instruction(bundle).
void initializeHexagonCallFrameInformationPass(PassRegistry &)
const MachineOperand & getOperand(unsigned i) const
void setObjectAlignment(int ObjectIdx, unsigned Align)
setObjectAlignment - Change the alignment of the specified stack object.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects...
bool needsAligna(const MachineFunction &MF) const
static unsigned getMaxCalleeSavedReg(const std::vector< CalleeSavedInfo > &CSI, const TargetRegisterInfo &TRI)
Returns the callee saved register with the largest id in the vector.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Statically lint checks LLVM IR
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
Properties which a MachineFunction may have at a given point in time.
This class contains meta information specific to a module.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.