55 #define DEBUG_TYPE "packets" 59 cl::desc(
"Disable Hexagon packetizer pass"));
63 cl::desc(
"Allow slot1 store and slot0 load"));
67 cl::desc(
"Allow non-solo packetization of volatile memory references"));
74 cl::desc(
"Disable vector double new-value-stores"));
91 HexagonPacketizer(
bool Min =
false)
105 StringRef getPassName()
const override {
return "Hexagon Packetizer"; }
124 "Hexagon Packetizer",
false,
false)
140 addMutation(llvm::make_unique<HexagonSubtarget::UsrOverflowMutation>());
141 addMutation(llvm::make_unique<HexagonSubtarget::HVXMemLatencyMutation>());
142 addMutation(llvm::make_unique<HexagonSubtarget::BankConflictMutation>());
149 for (
auto &MO : FirstI.
operands()) {
150 if (!MO.isReg() || !MO.isDef())
152 unsigned R = MO.getReg();
166 InsertPt = std::next(BundleIt).getInstrIterator();
187 for (++I; I != E && I->isBundledWithPred(); ++
I)
200 BundleIt->eraseFromParent();
207 HRI = HST.getRegisterInfo();
208 auto &MLI = getAnalysis<MachineLoopInfo>();
209 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
210 auto *MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
213 HII->genAllInsnTimingClasses(MF);
221 assert(Packetizer.getResourceTracker() &&
"Empty DFA table!");
233 auto MI = MB.begin();
235 auto NextI = std::next(
MI);
245 for (
auto &MB : MF) {
246 auto Begin = MB.begin(), End = MB.end();
247 while (Begin != End) {
251 while (RB != End && HII->isSchedulingBoundary(*RB, &MB, MF))
256 while (RE != End && !HII->isSchedulingBoundary(*RE, &MB, MF))
263 Packetizer.PacketizeMIs(&MB, RB, RE);
269 Packetizer.unpacketizeSoloInstrs(MF);
276 if (!tryAllocateResourcesForConstExt(
true))
281 return tryAllocateResourcesForConstExt(
false);
288 bool Avail = ResourceTracker->canReserveResources(*ExtMI);
289 if (Reserve && Avail)
290 ResourceTracker->reserveResources(*ExtMI);
298 if (DepReg == HRI->getRARegister())
301 if (HII->isDeallocRet(MI))
302 if (DepReg == HRI->getFrameRegister() || DepReg == HRI->getStackRegister())
312 if (MO.isReg() && MO.getReg() == DepReg && !MO.isImplicit())
325 return MI.
getOpcode() == Hexagon::J2_jump;
330 case Hexagon::Y2_barrier:
356 if (NewRC == &Hexagon::PredRegsRegClass) {
357 if (HII->isHVXVec(MI) && MI.
mayStore())
359 return HII->isPredicated(MI) && HII->getDotNewPredOp(MI,
nullptr) > 0;
362 return HII->mayBeNewStore(MI);
372 int CurOpcode = HII->getDotCurOp(MI);
373 MI.
setDesc(HII->get(CurOpcode));
379 for (
auto BI : CurrentPacketMIs) {
381 if (HII->isDotCurInst(*BI)) {
386 for (
auto &MO : BI->operands())
394 MI->
setDesc(HII->get(HII->getNonDotCurOp(*MI)));
402 if (!HII->isHVXVec(MI))
404 if (!HII->isHVXVec(*MII))
408 if (HII->isDotCurInst(MI) && !HII->mayBeCurLoad(MI))
411 if (!HII->mayBeCurLoad(MI))
420 dbgs() <<
"in packet\n";);
423 dbgs() <<
"Checking CUR against ";
427 bool FoundMatch =
false;
428 for (
auto &MO : MJ.operands())
429 if (MO.isReg() && MO.getReg() == DestReg)
436 for (
auto BI : CurrentPacketMIs) {
455 if (RC == &Hexagon::PredRegsRegClass)
456 NewOpcode = HII->getDotNewPredOp(MI, MBPI);
458 NewOpcode = HII->getDotNewOp(MI);
459 MI.
setDesc(HII->get(NewOpcode));
464 int NewOpcode = HII->getDotOldOp(MI);
465 MI.
setDesc(HII->get(NewOpcode));
472 case Hexagon::S2_storerd_io:
473 case Hexagon::S2_storeri_io:
474 case Hexagon::S2_storerh_io:
475 case Hexagon::S2_storerb_io:
483 if (HII->isValidOffset(Opc, NewOff, HRI)) {
493 case Hexagon::S2_storerd_io:
494 case Hexagon::S2_storeri_io:
495 case Hexagon::S2_storerh_io:
496 case Hexagon::S2_storerb_io:
514 if (!HII->getBaseAndOffsetPosition(MI, BPI, OPI))
517 if (!HII->getBaseAndOffsetPosition(MJ, BPJ, OPJ))
519 unsigned Reg = MI.getOperand(BPI).getReg();
525 for (
const auto &PI : SUI->
Preds)
530 if (!HII->getIncrementValue(MJ, Incr))
533 int64_t
Offset = MI.getOperand(OPI).getImm();
534 if (!HII->isValidOffset(MI.getOpcode(), Offset+Incr, HRI))
537 MI.getOperand(OPI).setImm(Offset + Incr);
546 if (!HII->getBaseAndOffsetPosition(MI, BP, OP))
577 if (MO.isReg() && MO.isDef())
578 DefRegsSet.
insert(MO.getReg());
581 if (MO.isReg() && MO.isUse() && DefRegsSet.count(MO.getReg()))
587 assert(Op1.
isReg() &&
"Post increment operand has be to a register.");
593 assert(Op0.
isReg() &&
"Post increment operand has be to a register.");
598 llvm_unreachable(
"mayLoad or mayStore not set for Post Increment operation");
610 case Hexagon::L4_loadrd_ap:
611 case Hexagon::L4_loadrb_ap:
612 case Hexagon::L4_loadrh_ap:
613 case Hexagon::L4_loadrub_ap:
614 case Hexagon::L4_loadruh_ap:
615 case Hexagon::L4_loadri_ap:
646 if (!HII->mayBeNewStore(MI))
659 if (PacketRC == &Hexagon::DoubleRegsRegClass)
664 for (
auto I : CurrentPacketMIs) {
665 SUnit *PacketSU = MIToSUnit.find(
I)->second;
672 if (HII->isPostIncrement(MI) &&
677 if (HII->isPostIncrement(PacketMI) && PacketMI.
mayLoad() &&
692 if (HII->isPredicated(PacketMI)) {
693 if (!HII->isPredicated(MI))
698 unsigned predRegNumSrc = 0;
699 unsigned predRegNumDst = 0;
703 for (
auto &MO : PacketMI.
operands()) {
706 predRegNumSrc = MO.getReg();
707 predRegClass = HRI->getMinimalPhysRegClass(predRegNumSrc);
708 if (predRegClass == &Hexagon::PredRegsRegClass)
711 assert((predRegClass == &Hexagon::PredRegsRegClass) &&
712 "predicate register not found in a predicated PacketMI instruction");
718 predRegNumDst = MO.getReg();
719 predRegClass = HRI->getMinimalPhysRegClass(predRegNumDst);
720 if (predRegClass == &Hexagon::PredRegsRegClass)
723 assert((predRegClass == &Hexagon::PredRegsRegClass) &&
724 "predicate register not found in a predicated MI instruction");
734 if (predRegNumDst != predRegNumSrc ||
735 HII->isDotNewInst(PacketMI) != HII->isDotNewInst(MI) ||
748 unsigned StartCheck = 0;
750 for (
auto I : CurrentPacketMIs) {
751 SUnit *TempSU = MIToSUnit.find(
I)->second;
757 if (&TempMI != &PacketMI && !StartCheck)
761 if (&TempMI == &PacketMI)
775 if (!HII->isPostIncrement(MI)) {
776 for (
unsigned opNum = 0; opNum < MI.
getNumOperands()-1; opNum++) {
787 for (
auto &MO : PacketMI.
operands()) {
788 if (MO.isRegMask() && MO.clobbersPhysReg(DepReg))
790 if (!MO.isReg() || !MO.isDef() || !MO.isImplicit())
792 unsigned R = MO.getReg();
793 if (R == DepReg || HRI->isSuperRegister(DepReg, R))
803 if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == DepReg)
813 const SUnit *PacketSU,
unsigned DepReg,
815 if (!HII->mayBeNewStore(MI))
820 if (canPromoteToNewValueStore(MI, PacketMI, DepReg))
831 if (CheckDef && MO.isRegMask() && MO.clobbersPhysReg(DepReg))
833 if (!MO.isReg() || MO.getReg() != DepReg || !MO.isImplicit())
835 if (CheckDef == MO.isDef())
846 if (HII->isDotNewInst(MI) && !HII->mayBeNewStore(MI))
849 if (!isNewifiable(MI, RC))
875 if (RC == &Hexagon::PredRegsRegClass)
876 return HII->predCanBeUsedAsDotNew(PI, DepReg);
878 if (RC != &Hexagon::PredRegsRegClass && !HII->mayBeNewStore(MI))
883 int NewOpcode = HII->getDotNewOp(MI);
886 bool ResourcesAvailable = ResourceTracker->canReserveResources(*NewMI);
888 if (!ResourcesAvailable)
892 if (!canPromoteToNewValue(MI, PacketSU, DepReg, MII))
913 SUnit *PacketSUDep = MIToSUnit.find(&MI)->second;
915 for (
auto I : CurrentPacketMIs) {
917 if (!HII->isPredicated(*
I))
921 SUnit *PacketSU = MIToSUnit.find(
I)->second;
926 if (PacketSU->
isSucc(PacketSUDep)) {
927 for (
unsigned i = 0; i < PacketSU->
Succs.size(); ++i) {
928 auto &Dep = PacketSU->
Succs[i];
929 if (Dep.getSUnit() == PacketSUDep && Dep.getKind() ==
SDep::Anti &&
930 Dep.getReg() == DepReg)
947 if (
Op.isReg() &&
Op.getReg() &&
Op.isUse() &&
948 Hexagon::PredRegsRegClass.contains(
Op.getReg()))
967 SUnit *SU = MIToSUnit[&MI1];
985 for (
auto I : CurrentPacketMIs) {
987 SUnit *PacketSU = MIToSUnit.find(
I)->second;
990 if (PacketSU->
isSucc(SU)) {
991 for (
unsigned i = 0; i < PacketSU->
Succs.size(); ++i) {
992 auto Dep = PacketSU->
Succs[i];
997 if (Dep.getSUnit() == SU && Dep.getKind() ==
SDep::Data &&
998 Hexagon::PredRegsRegClass.contains(Dep.getReg())) {
1004 if (restrictingDepExistInPacket(*
I, Dep.getReg()))
1017 return PReg1 == PReg2 &&
1018 Hexagon::PredRegsRegClass.contains(PReg1) &&
1019 Hexagon::PredRegsRegClass.contains(PReg2) &&
1021 HII->isDotNewInst(MI1) == HII->isDotNewInst(MI2);
1027 PromotedToDotNew =
false;
1028 GlueToNewValueJump =
false;
1029 GlueAllocframeStore =
false;
1030 FoundSequentialDependence =
false;
1053 auto *IS = ResourceTracker->getInstrItins()->beginStage(TID.
getSchedClass());
1054 unsigned FuncUnits = IS->getUnits();
1077 if (HII->isSolo(MI))
1113 case Hexagon::S2_storew_locked:
1114 case Hexagon::S4_stored_locked:
1115 case Hexagon::L2_loadw_locked:
1116 case Hexagon::L4_loadd_locked:
1117 case Hexagon::Y2_dccleana:
1118 case Hexagon::Y2_dccleaninva:
1119 case Hexagon::Y2_dcinva:
1120 case Hexagon::Y2_dczeroa:
1121 case Hexagon::Y4_l2fetch:
1122 case Hexagon::Y5_l2fetch: {
1126 unsigned TJ = HII.
getType(MJ);
1149 for (
auto &
B : MF) {
1152 for (
auto I =
B.instr_begin(),
E =
B.instr_end();
I !=
E;
I = NextI) {
1153 NextI = std::next(
I);
1166 bool InsertBeforeBundle;
1170 InsertBeforeBundle =
true;
1174 BundleIt =
moveInstrOut(MI, BundleIt, InsertBeforeBundle);
1183 case Hexagon::Y2_barrier:
1184 case Hexagon::Y2_dcfetchbo:
1185 case Hexagon::Y4_l2fetch:
1186 case Hexagon::Y5_l2fetch:
1199 if (HII->isPredicated(I) || HII->isPredicated(J))
1202 BitVector DeadDefs(Hexagon::NUM_TARGET_REGS);
1204 if (!MO.isReg() || !MO.isDef() || !MO.isDead())
1206 DeadDefs[MO.getReg()] =
true;
1210 if (!MO.isReg() || !MO.isDef() || !MO.isDead())
1212 unsigned R = MO.getReg();
1213 if (R != Hexagon::USR_OVF && DeadDefs[R])
1223 if ((HII->isSaveCalleeSavedRegsCall(I) &&
1225 (HII->isSaveCalleeSavedRegsCall(J) &&
1237 if (
MI.isCall() || HII->isDeallocRet(
MI) || HII->isNewValueJump(
MI))
1239 if (HII->isPredicated(
MI) && HII->isPredicatedNew(
MI) && HII->isJumpR(
MI))
1244 if (HII->isLoopN(I) && isBadForLoopN(J))
1246 if (HII->isLoopN(J) && isBadForLoopN(I))
1251 return HII->isDeallocRet(I) &&
1269 if (!OpJ.isRegMask())
1271 assert((J.
isCall() || HII->isTailCall(J)) &&
"Regmask on a non-call");
1274 if (OpJ.clobbersPhysReg(OpI.getReg()))
1276 }
else if (OpI.isRegMask()) {
1289 if ((SysI && StoreJ) || (SysJ && StoreI))
1292 if (StoreI && StoreJ) {
1293 if (HII->isNewValueInst(J) || HII->isMemOp(J) || HII->isMemOp(I))
1298 bool MopStI = HII->isMemOp(I) || StoreI;
1299 bool MopStJ = HII->isMemOp(J) || StoreJ;
1300 if (MopStI && MopStJ)
1304 return (StoreJ && HII->isDeallocRet(I)) || (StoreI && HII->isDeallocRet(J));
1316 if (CurrentPacketMIs.size() == 1)
1317 IgnoreDepMIs.clear();
1322 assert(!isSoloInstruction(I) &&
"Unexpected solo instr!");
1324 if (cannotCoexist(I, J))
1327 Dependence = hasDeadDependence(I, J) || hasControlDependence(I, J);
1349 if (NextMII != I.getParent()->end() && HII->isNewValueJump(*NextMII)) {
1352 bool secondRegMatch =
false;
1356 if (NOp1.
isReg() && I.getOperand(0).getReg() == NOp1.
getReg())
1357 secondRegMatch =
true;
1373 if (PI->getOpcode() == Hexagon::S2_allocframe || PI->mayStore() ||
1374 HII->isLoopN(*PI)) {
1380 if (OpR.
isReg() && PI->modifiesRegister(OpR.
getReg(), HRI)) {
1386 GlueToNewValueJump =
true;
1395 for (
unsigned i = 0; i < SUJ->
Succs.size(); ++i) {
1396 if (FoundSequentialDependence)
1399 if (SUJ->
Succs[i].getSUnit() != SUI)
1418 unsigned DepReg = 0;
1421 DepReg = SUJ->
Succs[i].getReg();
1422 RC = HRI->getMinimalPhysRegClass(DepReg);
1425 if (I.isCall() || HII->isJumpR(I) || I.isReturn() || HII->isTailCall(I)) {
1428 if (!isCallDependent(I, DepType, SUJ->
Succs[i].getReg()))
1433 if (canPromoteToDotCur(J, SUJ, DepReg, II, RC))
1434 if (promoteToDotCur(J, DepType, II, RC))
1439 if (DepType ==
SDep::Data && HII->isDotCurInst(J)) {
1440 if (HII->isHVXVec(I))
1446 if (canPromoteToDotNew(I, SUJ, DepReg, II, RC)) {
1447 if (promoteToDotNew(I, DepType, II, RC)) {
1448 PromotedToDotNew =
true;
1449 if (cannotCoexist(I, J))
1450 FoundSequentialDependence =
true;
1454 if (HII->isNewValueJump(I))
1460 if (HII->isPredicated(I) && HII->isPredicated(J) &&
1461 arePredicatesComplements(I, J)) {
1474 auto Itr =
find(IgnoreDepMIs, &J);
1475 if (Itr != IgnoreDepMIs.end()) {
1479 IgnoreDepMIs.push_back(&I);
1491 if (I.isConditionalBranch() && DepType !=
SDep::Data &&
1496 FoundSequentialDependence =
true;
1510 FoundSequentialDependence =
true;
1516 bool LoadI = I.mayLoad(), StoreI = I.mayStore();
1517 bool NVStoreJ = HII->isNewValueStore(J);
1518 bool NVStoreI = HII->isNewValueStore(I);
1519 bool IsVecJ = HII->isHVXVec(J);
1520 bool IsVecI = HII->isHVXVec(I);
1523 ((LoadJ && StoreI && !NVStoreI) ||
1524 (StoreJ && LoadI && !NVStoreJ)) &&
1525 (J.
getOpcode() != Hexagon::S2_allocframe &&
1526 I.getOpcode() != Hexagon::S2_allocframe) &&
1527 (J.
getOpcode() != Hexagon::L2_deallocframe &&
1528 I.getOpcode() != Hexagon::L2_deallocframe) &&
1529 (!HII->isMemOp(J) && !HII->isMemOp(I)) && (!IsVecJ && !IsVecI))
1530 setmemShufDisabled(
true);
1532 if (StoreJ && LoadI && alias(J, I)) {
1533 FoundSequentialDependence =
true;
1538 if (!LoadJ || (!LoadI && !StoreI)) {
1541 FoundSequentialDependence =
true;
1556 unsigned Opc = I.getOpcode();
1558 case Hexagon::S2_storerd_io:
1559 case Hexagon::S2_storeri_io:
1560 case Hexagon::S2_storerh_io:
1561 case Hexagon::S2_storerb_io:
1562 if (I.getOperand(0).getReg() == HRI->getStackRegister()) {
1567 GlueAllocframeStore = useCallersSP(I);
1568 if (GlueAllocframeStore)
1588 if (
Op.isReg() &&
Op.isDef()) {
1589 unsigned R =
Op.getReg();
1592 }
else if (!
Op.isRegMask()) {
1596 FoundSequentialDependence =
true;
1608 FoundSequentialDependence =
true;
1613 if (FoundSequentialDependence) {
1626 bool Coexist = !cannotCoexist(I, J);
1633 if (PromotedToDotNew)
1640 if (GlueAllocframeStore) {
1642 GlueAllocframeStore =
false;
1646 undoChangedOffset(I);
1648 if (GlueToNewValueJump) {
1651 GlueToNewValueJump =
false;
1658 if (ChangedOffset ==
INT64_MAX && updateOffset(SUI, SUJ)) {
1659 FoundSequentialDependence =
false;
1669 bool FoundLoad =
false;
1670 bool FoundStore =
false;
1672 for (
auto MJ : CurrentPacketMIs) {
1673 unsigned Opc = MJ->getOpcode();
1674 if (Opc == Hexagon::S2_allocframe || Opc == Hexagon::L2_deallocframe)
1676 if (HII->isMemOp(*MJ))
1680 if (MJ->mayStore() && !HII->isNewValueStore(*MJ))
1683 return FoundLoad && FoundStore;
1692 if (CurrentPacketMIs.empty())
1693 PacketStalls =
false;
1694 PacketStalls |= producesStall(MI);
1699 CurrentPacketMIs.push_back(&MI);
1702 assert(ResourceTracker->canReserveResources(MI));
1704 bool ExtMI = HII->isExtended(MI) || HII->isConstExtended(MI);
1707 if (GlueToNewValueJump) {
1713 ResourceTracker->reserveResources(MI);
1715 Good = tryAllocateResourcesForConstExt(
true);
1717 bool ExtNvjMI = HII->isExtended(NvjMI) || HII->isConstExtended(NvjMI);
1719 if (ResourceTracker->canReserveResources(NvjMI))
1720 ResourceTracker->reserveResources(NvjMI);
1724 if (Good && ExtNvjMI)
1725 Good = tryAllocateResourcesForConstExt(
true);
1729 assert(ResourceTracker->canReserveResources(MI));
1730 ResourceTracker->reserveResources(MI);
1732 assert(canReserveResourcesForConstExt());
1733 tryAllocateResourcesForConstExt(
true);
1735 assert(ResourceTracker->canReserveResources(NvjMI));
1736 ResourceTracker->reserveResources(NvjMI);
1738 assert(canReserveResourcesForConstExt());
1739 reserveResourcesForConstExt();
1742 CurrentPacketMIs.push_back(&MI);
1743 CurrentPacketMIs.push_back(&NvjMI);
1747 ResourceTracker->reserveResources(MI);
1748 if (ExtMI && !tryAllocateResourcesForConstExt(
true)) {
1750 if (PromotedToDotNew)
1752 if (GlueAllocframeStore) {
1754 GlueAllocframeStore =
false;
1756 ResourceTracker->reserveResources(MI);
1757 reserveResourcesForConstExt();
1760 CurrentPacketMIs.push_back(&MI);
1768 bool memShufDisabled = getmemShufDisabled();
1769 if (memShufDisabled && !foundLSInPacket()) {
1770 setmemShufDisabled(
false);
1773 memShufDisabled = getmemShufDisabled();
1775 OldPacketMIs.clear();
1778 for (
auto &
I :
make_range(HII->expandVGatherPseudo(*
MI), NextMI))
1779 OldPacketMIs.push_back(&
I);
1781 CurrentPacketMIs.clear();
1783 if (OldPacketMIs.size() > 1) {
1787 auto BundleMII = std::prev(FirstMI);
1788 if (memShufDisabled)
1789 HII->setBundleNoShuf(BundleMII);
1791 setmemShufDisabled(
false);
1794 ResourceTracker->clearResources();
1801 return !producesStall(MI);
1818 if (!OldPacketMIs.empty()) {
1819 auto *OldBB = OldPacketMIs.front()->getParent();
1821 if (MLI->getLoopFor(OldBB) != MLI->getLoopFor(ThisBB))
1848 for (
auto J : CurrentPacketMIs) {
1849 SUnit *SUJ = MIToSUnit[J];
1850 for (
auto &Pred : SUI->
Preds)
1851 if (Pred.getSUnit() == SUJ)
1852 if ((Pred.getLatency() == 0 && Pred.isAssignedRegDep()) ||
1853 HII->isNewValueJump(I) || HII->isToBeScheduledASAP(*J, I))
1859 for (
auto J : OldPacketMIs) {
1860 SUnit *SUJ = MIToSUnit[J];
1861 for (
auto &Pred : SUI->
Preds)
1862 if (Pred.getSUnit() == SUJ && Pred.getLatency() > 1)
1874 return new HexagonPacketizer(Minimal);
bool canPromoteToNewValueStore(const MachineInstr &MI, const MachineInstr &PacketMI, unsigned DepReg)
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
static bool hasWriteToReadDep(const MachineInstr &FirstI, const MachineInstr &SecondI, const TargetRegisterInfo *TRI)
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
bool isCall(QueryType Type=AnyInBundle) const
instr_iterator instr_end()
This class represents lattice values for constants.
bool canPromoteToDotNew(const MachineInstr &MI, const SUnit *PacketSU, unsigned DepReg, MachineBasicBlock::iterator &MII, const TargetRegisterClass *RC)
static bool cannotCoexistAsymm(const MachineInstr &MI, const MachineInstr &MJ, const HexagonInstrInfo &HII)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
bool isCFIInstruction() const
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
Describe properties that are true of each instruction in the target description file.
unsigned getReg() const
getReg - Returns the register number.
static bool isImplicitDependency(const MachineInstr &I, bool CheckDef, unsigned DepReg)
bool demoteToDotOld(MachineInstr &MI)
static cl::opt< bool > PacketizeVolatiles("hexagon-packetize-volatiles", cl::ZeroOrMore, cl::Hidden, cl::init(true), cl::desc("Allow non-solo packetization of volatile memory references"))
void undoChangedOffset(MachineInstr &MI)
Undo the changed offset.
static cl::opt< bool > DisablePacketizer("disable-packetizer", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Disable Hexagon packetizer pass"))
INITIALIZE_PASS_BEGIN(HexagonPacketizer, "hexagon-packetizer", "Hexagon Packetizer", false, false) INITIALIZE_PASS_END(HexagonPacketizer
unsigned const TargetRegisterInfo * TRI
Kind
These are the different kinds of scheduling dependencies.
iterator_range< mop_iterator > operands()
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
#define HEXAGON_LRFP_SIZE
SmallVector< SDep, 4 > Preds
All sunit predecessors.
A register anti-dependence (aka WAR).
bool restrictingDepExistInPacket(MachineInstr &, unsigned)
bool canPromoteToDotCur(const MachineInstr &MI, const SUnit *PacketSU, unsigned DepReg, MachineBasicBlock::iterator &MII, const TargetRegisterClass *RC)
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
static cl::opt< bool > EnableGenAllInsnClass("enable-gen-insn", cl::init(false), cl::Hidden, cl::ZeroOrMore, cl::desc("Generate all instruction with TC"))
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
unsigned getNumOperands() const
Retuns the total number of operands.
cl::opt< bool > Slot1Store("slot1-store-slot0-load", cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::desc("Allow slot1 store and slot0 load"))
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
bool shouldAddToPacket(const MachineInstr &MI) override
Regular data dependence (aka true-dependence).
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void endPacket(MachineBasicBlock *MBB, MachineBasicBlock::iterator MI) override
bool arePredicatesComplements(MachineInstr &MI1, MachineInstr &MI2)
static bool doesModifyCalleeSavedReg(const MachineInstr &MI, const TargetRegisterInfo *TRI)
Returns true if the instruction modifies a callee-saved register.
void unbundleFromPred()
Break bundle above this instruction.
A register output-dependence (aka WAW).
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
static bool isControlFlow(const MachineInstr &MI)
void initializeHexagonPacketizerPass(PassRegistry &)
instr_iterator getInstrIterator() const
bool isInsideBundle() const
Return true if MI is in a bundle (but not the first MI in a bundle).
virtual const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const =0
Return a null-terminated list of all of the callee-saved registers on this target.
hexagon Hexagon Packetizer
bool hasDualStoreDependence(const MachineInstr &I, const MachineInstr &J)
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
static MachineBasicBlock::iterator moveInstrOut(MachineInstr &MI, MachineBasicBlock::iterator BundleIt, bool Before)
static unsigned getPredicatedRegister(MachineInstr &MI, const HexagonInstrInfo *QII)
Gets the predicate register of a predicated instruction.
static const MachineOperand & getAbsSetOperand(const MachineInstr &MI)
bool isSoloInstruction(const MachineInstr &MI) override
unsigned getSchedClass() const
Return the scheduling class for this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
initializer< Ty > init(const Ty &Val)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
static bool isSystemInstr(const MachineInstr &MI)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
std::pair< iterator, bool > insert(const ValueT &V)
Represent the analysis usage information of a pass.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
void setImm(int64_t immVal)
FunctionPass class - This class is used to implement most global optimizations.
self_iterator getIterator()
static bool isRegDependence(const SDep::Kind DepType)
bool tryAllocateResourcesForConstExt(bool Reserve)
void DeleteMachineInstr(MachineInstr *MI)
DeleteMachineInstr - Delete the given MachineInstr.
bool isImplicitDef() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Any other ordering dependency.
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
bool isPostIncrement(const MachineInstr &MI) const override
Return true for post-incremented instructions.
bool isDebugInstr() const
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) override
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineOperand & getPostIncrementOperand(const MachineInstr &MI, const HexagonInstrInfo *HII)
Iterator for intrusive lists based on ilist_node.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
bool isNewValueStore(const MachineInstr &MI) const
bool hasV60OpsOnly() const
bool isDebugValue() const
MachineOperand class - Representation of each machine instruction operand.
uint64_t getType(const MachineInstr &MI) const
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
static cl::opt< bool > DisableVecDblNVStores("disable-vecdbl-nv-stores", cl::init(false), cl::Hidden, cl::ZeroOrMore, cl::desc("Disable vector double new-value-stores"))
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
void setPreservesCFG()
This function should be called by the pass, iff they do not:
const Function & getFunction() const
Return the LLVM function that this machine code represents.
void initPacketizerState() override
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool canReserveResourcesForConstExt()
MachineBasicBlock::iterator addToPacket(MachineInstr &MI) override
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
bool mayStore() const
Return true if this instruction could possibly modify memory.
static const MachineOperand & getStoreValueOperand(const MachineInstr &MI)
static bool isLoadAbsSet(const MachineInstr &MI)
static PredicateKind getPredicateSense(const MachineInstr &MI, const HexagonInstrInfo *HII)
Returns true if an instruction is predicated on p0 and false if it's predicated on !p0...
const MachineBasicBlock * getParent() const
MachineFunctionProperties & set(Property P)
Representation of each machine instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool isSchedBarrier(const MachineInstr &MI)
void reserveResourcesForConstExt()
bool promoteToDotCur(MachineInstr &MI, SDep::Kind DepType, MachineBasicBlock::iterator &MII, const TargetRegisterClass *RC)
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
bool isCallDependent(const MachineInstr &MI, SDep::Kind DepType, unsigned DepReg)
bool ignorePseudoInstruction(const MachineInstr &MI, const MachineBasicBlock *MBB) override
bool isNewifiable(const MachineInstr &MI, const TargetRegisterClass *NewRC)
bool updateOffset(SUnit *SUI, SUnit *SUJ)
Return true if we can update the offset in MI so that MI and MJ can be packetized together...
cl::opt< bool > ScheduleInlineAsm
bool isCall() const
Return true if the instruction is a call.
bool isPredicatedTrue(const MachineInstr &MI) const
bool hasControlDependence(const MachineInstr &I, const MachineInstr &J)
bool useCallersSP(MachineInstr &MI)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool promoteToDotNew(MachineInstr &MI, SDep::Kind DepType, MachineBasicBlock::iterator &MII, const TargetRegisterClass *RC)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasDeadDependence(const MachineInstr &I, const MachineInstr &J)
bool isHVXMemWithAIndirect(const MachineInstr &I, const MachineInstr &J) const
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
bool cannotCoexist(const MachineInstr &MI, const MachineInstr &MJ)
bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) override
FunctionPass * createHexagonPacketizer(bool Minimal)
const HexagonInstrInfo * getInstrInfo() const override
SmallVector< SDep, 4 > Succs
All sunit successors.
void useCalleesSP(MachineInstr &MI)
bool hasRegMaskDependence(const MachineInstr &I, const MachineInstr &J)
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
StringRef - Represent a constant reference to a string, i.e.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
void unpacketizeSoloInstrs(MachineFunction &MF)
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
Dependence - This class represents a dependence between two memory memory references in a function...
const MachineOperand & getOperand(unsigned i) const
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
bool canPromoteToNewValue(const MachineInstr &MI, const SUnit *PacketSU, unsigned DepReg, MachineBasicBlock::iterator &MII)
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool isSucc(const SUnit *N) const
Tests if node N is a successor of this node.
static bool isDirectJump(const MachineInstr &MI)
bool producesStall(const MachineInstr &MI)
Properties which a MachineFunction may have at a given point in time.
Scheduling unit. This is a node in the scheduling DAG.