65 #define DEBUG_TYPE "hexagon-instrinfo" 67 #define GET_INSTRINFO_CTOR_DTOR 68 #define GET_INSTRMAP_INFO 70 #include "HexagonGenDFAPacketizer.inc" 71 #include "HexagonGenInstrInfo.inc" 75 "packetization boundary."));
82 cl::desc(
"Disable schedule adjustment for new value stores."));
86 cl::desc(
"Enable timing class latency"));
90 cl::desc(
"Enable vec alu forwarding"));
94 cl::desc(
"Enable vec acc forwarding"));
101 cl::desc(
"Use the DFA based hazard recognizer."));
116 void HexagonInstrInfo::anchor() {}
123 return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
124 (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
136 for (; MIB != MIE; ++MIB) {
137 if (!MIB->isDebugInstr())
152 if (EndLoopOp == Hexagon::ENDLOOP0) {
153 LOOPi = Hexagon::J2_loop0i;
154 LOOPr = Hexagon::J2_loop0r;
156 LOOPi = Hexagon::J2_loop1i;
157 LOOPr = Hexagon::J2_loop1r;
163 if (!Visited.
insert(PB).second)
167 for (
auto I = PB->instr_rbegin(),
E = PB->instr_rend();
I !=
E; ++
I) {
168 unsigned Opc =
I->getOpcode();
169 if (Opc == LOOPi || Opc == LOOPr)
173 if (Opc == EndLoopOp &&
I->getOperand(0).getMBB() != TargetBB)
245 case Hexagon::L2_loadri_io:
246 case Hexagon::L2_loadrd_io:
247 case Hexagon::V6_vL32b_ai:
248 case Hexagon::V6_vL32b_nt_ai:
249 case Hexagon::V6_vL32Ub_ai:
250 case Hexagon::LDriw_pred:
251 case Hexagon::LDriw_ctr:
252 case Hexagon::PS_vloadrq_ai:
253 case Hexagon::PS_vloadrw_ai:
254 case Hexagon::PS_vloadrw_nt_ai: {
265 case Hexagon::L2_ploadrit_io:
266 case Hexagon::L2_ploadrif_io:
267 case Hexagon::L2_ploadrdt_io:
268 case Hexagon::L2_ploadrdf_io: {
293 case Hexagon::S2_storerb_io:
294 case Hexagon::S2_storerh_io:
295 case Hexagon::S2_storeri_io:
296 case Hexagon::S2_storerd_io:
297 case Hexagon::V6_vS32b_ai:
298 case Hexagon::V6_vS32Ub_ai:
299 case Hexagon::STriw_pred:
300 case Hexagon::STriw_ctr:
301 case Hexagon::PS_vstorerq_ai:
302 case Hexagon::PS_vstorerw_ai: {
313 case Hexagon::S2_pstorerbt_io:
314 case Hexagon::S2_pstorerbf_io:
315 case Hexagon::S2_pstorerht_io:
316 case Hexagon::S2_pstorerhf_io:
317 case Hexagon::S2_pstorerit_io:
318 case Hexagon::S2_pstorerif_io:
319 case Hexagon::S2_pstorerdt_io:
320 case Hexagon::S2_pstorerdf_io: {
344 for (++MII; MII != MBB->
instr_end() && MII->isInsideBundle(); ++MII)
362 for (++MII; MII != MBB->
instr_end() && MII->isInsideBundle(); ++MII)
390 bool AllowModify)
const {
422 while (I->isDebugInstr()) {
428 bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
429 I->getOperand(0).isMBB();
431 if (AllowModify && JumpToBlock &&
434 I->eraseFromParent();
440 if (!isUnpredicatedTerminator(*I))
448 if (&*I != LastInst && !I->
isBundle() && isUnpredicatedTerminator(*I)) {
450 SecondLastInst = &*
I;
461 int SecLastOpcode = SecondLastInst ? SecondLastInst->
getOpcode() : 0;
464 if (LastOpcode == Hexagon::J2_jump && !LastInst->
getOperand(0).
isMBB())
466 if (SecLastOpcode == Hexagon::J2_jump &&
477 if (LastInst && !SecondLastInst) {
478 if (LastOpcode == Hexagon::J2_jump) {
488 if (LastOpcodeHasJMP_c) {
503 <<
" with one jump\n";);
510 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
521 if (SecLastOpcodeHasNVJump &&
523 (LastOpcode == Hexagon::J2_jump)) {
534 if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
538 I->eraseFromParent();
543 if (
isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
551 <<
" with two jumps";);
557 int *BytesRemoved)
const {
558 assert(!BytesRemoved &&
"code size not handled");
563 while (I != MBB.
begin()) {
565 if (I->isDebugInstr())
570 if (Count && (I->getOpcode() == Hexagon::J2_jump))
584 int *BytesAdded)
const {
585 unsigned BOpc = Hexagon::J2_jump;
586 unsigned BccOpc = Hexagon::J2_jumpt;
588 assert(TBB &&
"insertBranch must not be told to insert a fallthrough");
589 assert(!BytesAdded &&
"code size not handled");
594 if (!Cond.
empty() && Cond[0].isImm())
595 BccOpc = Cond[0].getImm();
615 int EndLoopOp = Cond[0].getImm();
622 assert(Loop !=
nullptr &&
"Inserting an ENDLOOP without a LOOP");
627 assert((Cond.
size() == 3) &&
"Only supporting rr/ri version of nvjump");
634 if (Cond[2].
isReg()) {
638 }
else if(Cond[2].isImm()) {
640 addImm(Cond[2].getImm()).
addMBB(TBB);
644 assert((Cond.
size() == 2) &&
"Malformed cond vector");
652 "Cond. cannot be empty when multiple branchings are required");
654 "NV-jump cannot be inserted with another branch");
657 int EndLoopOp = Cond[0].getImm();
664 assert(Loop !=
nullptr &&
"Inserting an ENDLOOP without a LOOP");
691 IndVarInst =
nullptr;
705 unsigned Iter,
unsigned MaxIter)
const {
709 &&
"Expecting a hardware loop");
719 if (Loop->
getOpcode() == Hexagon::J2_loop0i ||
720 Loop->
getOpcode() == Hexagon::J2_loop1i) {
730 assert(Loop->
getOpcode() == Hexagon::J2_loop0r &&
"Unexpected instruction");
735 addReg(LoopCount).
addImm(1);
738 addReg(LoopCount).
addImm(-1);
742 E = PrevInsts.
end();
I !=
E; ++
I)
743 (*I)->substituteRegister(LoopCount, NewLoopCount, 0, HRI);
750 BuildMI(&MBB, DL,
get(Hexagon::J2_loop0r)).
761 unsigned NumCycles,
unsigned ExtraPredCycles,
775 return NumInstrs <= 4;
780 const DebugLoc &DL,
unsigned DestReg,
781 unsigned SrcReg,
bool KillSrc)
const {
785 if (Hexagon::IntRegsRegClass.
contains(SrcReg, DestReg)) {
786 BuildMI(MBB, I, DL,
get(Hexagon::A2_tfr), DestReg)
787 .
addReg(SrcReg, KillFlag);
790 if (Hexagon::DoubleRegsRegClass.
contains(SrcReg, DestReg)) {
791 BuildMI(MBB, I, DL,
get(Hexagon::A2_tfrp), DestReg)
792 .
addReg(SrcReg, KillFlag);
795 if (Hexagon::PredRegsRegClass.
contains(SrcReg, DestReg)) {
797 BuildMI(MBB, I, DL,
get(Hexagon::C2_or), DestReg)
801 if (Hexagon::CtrRegsRegClass.
contains(DestReg) &&
802 Hexagon::IntRegsRegClass.
contains(SrcReg)) {
803 BuildMI(MBB, I, DL,
get(Hexagon::A2_tfrrcr), DestReg)
804 .
addReg(SrcReg, KillFlag);
807 if (Hexagon::IntRegsRegClass.
contains(DestReg) &&
808 Hexagon::CtrRegsRegClass.
contains(SrcReg)) {
809 BuildMI(MBB, I, DL,
get(Hexagon::A2_tfrcrr), DestReg)
810 .
addReg(SrcReg, KillFlag);
813 if (Hexagon::ModRegsRegClass.
contains(DestReg) &&
814 Hexagon::IntRegsRegClass.
contains(SrcReg)) {
815 BuildMI(MBB, I, DL,
get(Hexagon::A2_tfrrcr), DestReg)
816 .
addReg(SrcReg, KillFlag);
819 if (Hexagon::PredRegsRegClass.
contains(SrcReg) &&
820 Hexagon::IntRegsRegClass.
contains(DestReg)) {
821 BuildMI(MBB, I, DL,
get(Hexagon::C2_tfrpr), DestReg)
822 .
addReg(SrcReg, KillFlag);
825 if (Hexagon::IntRegsRegClass.
contains(SrcReg) &&
826 Hexagon::PredRegsRegClass.
contains(DestReg)) {
827 BuildMI(MBB, I, DL,
get(Hexagon::C2_tfrrp), DestReg)
828 .
addReg(SrcReg, KillFlag);
831 if (Hexagon::PredRegsRegClass.
contains(SrcReg) &&
832 Hexagon::IntRegsRegClass.
contains(DestReg)) {
833 BuildMI(MBB, I, DL,
get(Hexagon::C2_tfrpr), DestReg)
834 .
addReg(SrcReg, KillFlag);
837 if (Hexagon::HvxVRRegClass.
contains(SrcReg, DestReg)) {
838 BuildMI(MBB, I, DL,
get(Hexagon::V6_vassign), DestReg).
839 addReg(SrcReg, KillFlag);
842 if (Hexagon::HvxWRRegClass.
contains(SrcReg, DestReg)) {
843 unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
844 unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
845 BuildMI(MBB, I, DL,
get(Hexagon::V6_vcombine), DestReg)
850 if (Hexagon::HvxQRRegClass.
contains(SrcReg, DestReg)) {
851 BuildMI(MBB, I, DL,
get(Hexagon::V6_pred_and), DestReg)
853 .
addReg(SrcReg, KillFlag);
856 if (Hexagon::HvxQRRegClass.
contains(SrcReg) &&
857 Hexagon::HvxVRRegClass.
contains(DestReg)) {
861 if (Hexagon::HvxQRRegClass.
contains(DestReg) &&
862 Hexagon::HvxVRRegClass.
contains(SrcReg)) {
891 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
892 BuildMI(MBB, I, DL,
get(Hexagon::S2_storeri_io))
895 }
else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
896 BuildMI(MBB, I, DL,
get(Hexagon::S2_storerd_io))
899 }
else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
900 BuildMI(MBB, I, DL,
get(Hexagon::STriw_pred))
903 }
else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
904 BuildMI(MBB, I, DL,
get(Hexagon::STriw_ctr))
907 }
else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
908 BuildMI(MBB, I, DL,
get(Hexagon::PS_vstorerq_ai))
911 }
else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
915 unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vS32Ub_ai
916 : Hexagon::V6_vS32b_ai;
923 }
else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
927 unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vstorerwu_ai
928 : Hexagon::PS_vstorerw_ai;
956 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
957 BuildMI(MBB, I, DL,
get(Hexagon::L2_loadri_io), DestReg)
959 }
else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
960 BuildMI(MBB, I, DL,
get(Hexagon::L2_loadrd_io), DestReg)
962 }
else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
963 BuildMI(MBB, I, DL,
get(Hexagon::LDriw_pred), DestReg)
965 }
else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
966 BuildMI(MBB, I, DL,
get(Hexagon::LDriw_ctr), DestReg)
968 }
else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
969 BuildMI(MBB, I, DL,
get(Hexagon::PS_vloadrq_ai), DestReg)
971 }
else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
975 unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vL32Ub_ai
976 : Hexagon::V6_vL32b_ai;
980 BuildMI(MBB, I, DL,
get(Opc), DestReg)
982 }
else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
986 unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vloadrwu_ai
987 : Hexagon::PS_vloadrw_ai;
991 BuildMI(MBB, I, DL,
get(Opc), DestReg)
1020 auto RealCirc = [&](
unsigned Opc,
bool HasImm,
unsigned MxOp) {
1022 unsigned CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1);
1023 BuildMI(MBB, MI, DL,
get(Hexagon::A2_tfrrcr), CSx)
1035 case TargetOpcode::COPY: {
1041 std::prev(MBBI)->copyImplicitOps(*MBB.
getParent(),
MI);
1046 case Hexagon::PS_aligna:
1052 case Hexagon::V6_vassignp: {
1056 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vcombine), DstReg)
1057 .
addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi),
Kill)
1058 .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo),
Kill);
1062 case Hexagon::V6_lo: {
1065 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1071 case Hexagon::V6_hi: {
1074 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1080 case Hexagon::PS_vstorerw_ai:
1081 case Hexagon::PS_vstorerwu_ai: {
1082 bool Aligned = Opc == Hexagon::PS_vstorerw_ai;
1084 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1085 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1086 unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai;
1087 unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1095 BuildMI(MBB, MI, DL,
get(NewOpc))
1104 case Hexagon::PS_vloadrw_ai:
1105 case Hexagon::PS_vloadrwu_ai: {
1106 bool Aligned = Opc == Hexagon::PS_vloadrw_ai;
1108 unsigned NewOpc = Aligned ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai;
1109 unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1112 HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1117 BuildMI(MBB, MI, DL,
get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1125 case Hexagon::PS_true: {
1127 BuildMI(MBB, MI, DL,
get(Hexagon::C2_orn), Reg)
1133 case Hexagon::PS_false: {
1135 BuildMI(MBB, MI, DL,
get(Hexagon::C2_andn), Reg)
1141 case Hexagon::PS_qtrue: {
1148 case Hexagon::PS_qfalse: {
1155 case Hexagon::PS_vdd0: {
1157 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vsubw_dv), Vd)
1163 case Hexagon::PS_vmulw: {
1168 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1169 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1170 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1171 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1173 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1177 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1187 case Hexagon::PS_vmulw_acc: {
1193 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1194 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1195 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1196 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1197 unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1198 unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1200 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1205 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1218 case Hexagon::PS_pselect: {
1223 unsigned Rd = Op0.
getReg();
1224 unsigned Pu = Op1.
getReg();
1225 unsigned Rs = Op2.
getReg();
1226 unsigned Rt = Op3.
getReg();
1232 BuildMI(MBB, MI, DL,
get(Hexagon::A2_tfrpt), Rd)
1233 .
addReg(Pu, (Rd == Rt) ? K1 : 0)
1236 BuildMI(MBB, MI, DL,
get(Hexagon::A2_tfrpf), Rd)
1242 case Hexagon::PS_vselect: {
1250 unsigned PReg = Op1.
getReg();
1257 auto T =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vcmov))
1266 auto T =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vncmov))
1276 case Hexagon::PS_wselect: {
1284 unsigned PReg = Op1.
getReg();
1291 unsigned SrcLo = HRI.getSubReg(Op2.
getReg(), Hexagon::vsub_lo);
1292 unsigned SrcHi = HRI.getSubReg(Op2.
getReg(), Hexagon::vsub_hi);
1293 auto T =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vccombine))
1303 unsigned SrcLo = HRI.getSubReg(Op3.
getReg(), Hexagon::vsub_lo);
1304 unsigned SrcHi = HRI.getSubReg(Op3.
getReg(), Hexagon::vsub_hi);
1305 auto T =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vnccombine))
1317 case Hexagon::PS_tailcall_i:
1318 MI.
setDesc(
get(Hexagon::J2_jump));
1320 case Hexagon::PS_tailcall_r:
1321 case Hexagon::PS_jmpret:
1322 MI.
setDesc(
get(Hexagon::J2_jumpr));
1324 case Hexagon::PS_jmprett:
1325 MI.
setDesc(
get(Hexagon::J2_jumprt));
1327 case Hexagon::PS_jmpretf:
1328 MI.
setDesc(
get(Hexagon::J2_jumprf));
1330 case Hexagon::PS_jmprettnewpt:
1331 MI.
setDesc(
get(Hexagon::J2_jumprtnewpt));
1333 case Hexagon::PS_jmpretfnewpt:
1334 MI.
setDesc(
get(Hexagon::J2_jumprfnewpt));
1336 case Hexagon::PS_jmprettnew:
1337 MI.
setDesc(
get(Hexagon::J2_jumprtnew));
1339 case Hexagon::PS_jmpretfnew:
1340 MI.
setDesc(
get(Hexagon::J2_jumprfnew));
1343 case Hexagon::PS_loadrub_pci:
1344 return RealCirc(Hexagon::L2_loadrub_pci,
true, 4);
1345 case Hexagon::PS_loadrb_pci:
1346 return RealCirc(Hexagon::L2_loadrb_pci,
true, 4);
1347 case Hexagon::PS_loadruh_pci:
1348 return RealCirc(Hexagon::L2_loadruh_pci,
true, 4);
1349 case Hexagon::PS_loadrh_pci:
1350 return RealCirc(Hexagon::L2_loadrh_pci,
true, 4);
1351 case Hexagon::PS_loadri_pci:
1352 return RealCirc(Hexagon::L2_loadri_pci,
true, 4);
1353 case Hexagon::PS_loadrd_pci:
1354 return RealCirc(Hexagon::L2_loadrd_pci,
true, 4);
1355 case Hexagon::PS_loadrub_pcr:
1356 return RealCirc(Hexagon::L2_loadrub_pcr,
false, 3);
1357 case Hexagon::PS_loadrb_pcr:
1358 return RealCirc(Hexagon::L2_loadrb_pcr,
false, 3);
1359 case Hexagon::PS_loadruh_pcr:
1360 return RealCirc(Hexagon::L2_loadruh_pcr,
false, 3);
1361 case Hexagon::PS_loadrh_pcr:
1362 return RealCirc(Hexagon::L2_loadrh_pcr,
false, 3);
1363 case Hexagon::PS_loadri_pcr:
1364 return RealCirc(Hexagon::L2_loadri_pcr,
false, 3);
1365 case Hexagon::PS_loadrd_pcr:
1366 return RealCirc(Hexagon::L2_loadrd_pcr,
false, 3);
1367 case Hexagon::PS_storerb_pci:
1368 return RealCirc(Hexagon::S2_storerb_pci,
true, 3);
1369 case Hexagon::PS_storerh_pci:
1370 return RealCirc(Hexagon::S2_storerh_pci,
true, 3);
1371 case Hexagon::PS_storerf_pci:
1372 return RealCirc(Hexagon::S2_storerf_pci,
true, 3);
1373 case Hexagon::PS_storeri_pci:
1374 return RealCirc(Hexagon::S2_storeri_pci,
true, 3);
1375 case Hexagon::PS_storerd_pci:
1376 return RealCirc(Hexagon::S2_storerd_pci,
true, 3);
1377 case Hexagon::PS_storerb_pcr:
1378 return RealCirc(Hexagon::S2_storerb_pcr,
false, 2);
1379 case Hexagon::PS_storerh_pcr:
1380 return RealCirc(Hexagon::S2_storerh_pcr,
false, 2);
1381 case Hexagon::PS_storerf_pcr:
1382 return RealCirc(Hexagon::S2_storerf_pcr,
false, 2);
1383 case Hexagon::PS_storeri_pcr:
1384 return RealCirc(Hexagon::S2_storeri_pcr,
false, 2);
1385 case Hexagon::PS_storerd_pcr:
1386 return RealCirc(Hexagon::S2_storerd_pcr,
false, 2);
1400 case Hexagon::V6_vgathermh_pseudo:
1401 First =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vgathermh))
1405 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vS32b_new_ai))
1412 case Hexagon::V6_vgathermw_pseudo:
1413 First =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vgathermw))
1417 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vS32b_new_ai))
1424 case Hexagon::V6_vgathermhw_pseudo:
1425 First =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vgathermhw))
1429 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vS32b_new_ai))
1436 case Hexagon::V6_vgathermhq_pseudo:
1437 First =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vgathermhq))
1442 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vS32b_new_ai))
1449 case Hexagon::V6_vgathermwq_pseudo:
1450 First =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vgathermwq))
1455 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vS32b_new_ai))
1462 case Hexagon::V6_vgathermhwq_pseudo:
1463 First =
BuildMI(MBB, MI, DL,
get(Hexagon::V6_vgathermhwq))
1468 BuildMI(MBB, MI, DL,
get(Hexagon::V6_vS32b_new_ai))
1485 assert(Cond[0].isImm() &&
"First entry in the cond vector not imm-val");
1486 unsigned opcode = Cond[0].getImm();
1488 assert(
get(opcode).
isBranch() &&
"Should be a branching condition.");
1492 Cond[0].setImm(NewOpcode);
1499 BuildMI(MBB, MI, DL,
get(Hexagon::A2_nop));
1540 while (NOp < NumOps) {
1548 unsigned PredReg, PredRegPos, PredRegFlags;
1549 bool GotPredReg =
getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1552 T.
addReg(PredReg, PredRegFlags);
1553 while (NOp < NumOps)
1577 std::vector<MachineOperand> &Pred)
const {
1586 if (RC == &Hexagon::PredRegsRegClass) {
1592 for (
unsigned PR : Hexagon::PredRegsRegClass) {
1615 case Hexagon::V6_vL32b_ai:
1616 case Hexagon::V6_vL32b_pi:
1617 case Hexagon::V6_vL32b_ppu:
1618 case Hexagon::V6_vL32b_cur_ai:
1619 case Hexagon::V6_vL32b_cur_pi:
1620 case Hexagon::V6_vL32b_cur_ppu:
1621 case Hexagon::V6_vL32b_nt_ai:
1622 case Hexagon::V6_vL32b_nt_pi:
1623 case Hexagon::V6_vL32b_nt_ppu:
1624 case Hexagon::V6_vL32b_tmp_ai:
1625 case Hexagon::V6_vL32b_tmp_pi:
1626 case Hexagon::V6_vL32b_tmp_ppu:
1627 case Hexagon::V6_vL32b_nt_cur_ai:
1628 case Hexagon::V6_vL32b_nt_cur_pi:
1629 case Hexagon::V6_vL32b_nt_cur_ppu:
1630 case Hexagon::V6_vL32b_nt_tmp_ai:
1631 case Hexagon::V6_vL32b_nt_tmp_pi:
1632 case Hexagon::V6_vL32b_nt_tmp_ppu:
1687 bool atInsnStart =
true;
1688 unsigned Length = 0;
1689 for (; *Str; ++Str) {
1693 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
1695 atInsnStart =
false;
1699 atInsnStart =
false;
1704 Length += AStr.
count(Occ)*4;
1721 unsigned &SrcReg2,
int &
Mask,
1727 case Hexagon::C2_cmpeq:
1728 case Hexagon::C2_cmpeqp:
1729 case Hexagon::C2_cmpgt:
1730 case Hexagon::C2_cmpgtp:
1731 case Hexagon::C2_cmpgtu:
1732 case Hexagon::C2_cmpgtup:
1733 case Hexagon::C4_cmpneq:
1734 case Hexagon::C4_cmplte:
1735 case Hexagon::C4_cmplteu:
1736 case Hexagon::C2_cmpeqi:
1737 case Hexagon::C2_cmpgti:
1738 case Hexagon::C2_cmpgtui:
1739 case Hexagon::C4_cmpneqi:
1740 case Hexagon::C4_cmplteui:
1741 case Hexagon::C4_cmpltei:
1745 case Hexagon::A4_cmpbeq:
1746 case Hexagon::A4_cmpbgt:
1747 case Hexagon::A4_cmpbgtu:
1748 case Hexagon::A4_cmpbeqi:
1749 case Hexagon::A4_cmpbgti:
1750 case Hexagon::A4_cmpbgtui:
1754 case Hexagon::A4_cmpheq:
1755 case Hexagon::A4_cmphgt:
1756 case Hexagon::A4_cmphgtu:
1757 case Hexagon::A4_cmpheqi:
1758 case Hexagon::A4_cmphgti:
1759 case Hexagon::A4_cmphgtui:
1767 case Hexagon::C2_cmpeq:
1768 case Hexagon::C2_cmpeqp:
1769 case Hexagon::C2_cmpgt:
1770 case Hexagon::C2_cmpgtp:
1771 case Hexagon::C2_cmpgtu:
1772 case Hexagon::C2_cmpgtup:
1773 case Hexagon::A4_cmpbeq:
1774 case Hexagon::A4_cmpbgt:
1775 case Hexagon::A4_cmpbgtu:
1776 case Hexagon::A4_cmpheq:
1777 case Hexagon::A4_cmphgt:
1778 case Hexagon::A4_cmphgtu:
1779 case Hexagon::C4_cmpneq:
1780 case Hexagon::C4_cmplte:
1781 case Hexagon::C4_cmplteu:
1785 case Hexagon::C2_cmpeqi:
1786 case Hexagon::C2_cmpgtui:
1787 case Hexagon::C2_cmpgti:
1788 case Hexagon::C4_cmpneqi:
1789 case Hexagon::C4_cmplteui:
1790 case Hexagon::C4_cmpltei:
1791 case Hexagon::A4_cmpbeqi:
1792 case Hexagon::A4_cmpbgti:
1793 case Hexagon::A4_cmpbgtui:
1794 case Hexagon::A4_cmpheqi:
1795 case Hexagon::A4_cmphgti:
1796 case Hexagon::A4_cmphgtui: {
1811 unsigned *PredCost)
const {
1837 unsigned BasePosA, OffsetPosA;
1841 unsigned BaseRegA = BaseA.
getReg();
1845 unsigned BasePosB, OffsetPosB;
1849 unsigned BaseRegB = BaseB.
getReg();
1852 if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
1870 if (OffsetA > OffsetB) {
1871 uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
1872 return SizeB <= OffDiff;
1874 if (OffsetA < OffsetB) {
1875 uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
1876 return SizeA <= OffDiff;
1886 unsigned BasePos = 0, OffsetPos = 0;
1890 if (OffsetOp.
isImm()) {
1891 Value = OffsetOp.
getImm();
1894 }
else if (MI.
getOpcode() == Hexagon::A2_addi) {
1896 if (AddOp.
isImm()) {
1905 std::pair<unsigned, unsigned>
1913 using namespace HexagonII;
1915 static const std::pair<unsigned, const char*> Flags[] = {
1923 {
MO_IE,
"hexagon-ie"},
1932 using namespace HexagonII;
1934 static const std::pair<unsigned, const char*> Flags[] = {
1944 TRC = &Hexagon::PredRegsRegClass;
1946 TRC = &Hexagon::IntRegsRegClass;
1948 TRC = &Hexagon::DoubleRegsRegClass;
2019 assert(MO.
isImm() &&
"Extendable operand must be Immediate type");
2023 int ImmValue = MO.
getImm();
2025 return (ImmValue < MinValue || ImmValue > MaxValue);
2030 case Hexagon::L4_return:
2031 case Hexagon::L4_return_t:
2032 case Hexagon::L4_return_f:
2033 case Hexagon::L4_return_tnew_pnt:
2034 case Hexagon::L4_return_fnew_pnt:
2035 case Hexagon::L4_return_tnew_pt:
2036 case Hexagon::L4_return_fnew_pt:
2057 for (
auto &RegA : DefsA)
2058 for (
auto &RegB : UsesB) {
2065 if (RegB == *SubRegs)
2070 if (RegA == *SubRegs)
2080 case Hexagon::V6_vL32b_cur_pi:
2081 case Hexagon::V6_vL32b_cur_ai:
2114 return (Opcode == Hexagon::ENDLOOP0 ||
2115 Opcode == Hexagon::ENDLOOP1);
2142 case Hexagon::PS_fi:
2143 case Hexagon::PS_fia:
2169 const uint64_t
F =
get(Opcode).TSFlags;
2185 case Hexagon::J2_callr:
2186 case Hexagon::J2_callrf:
2187 case Hexagon::J2_callrt:
2188 case Hexagon::PS_call_nr:
2196 case Hexagon::L4_return:
2197 case Hexagon::L4_return_t:
2198 case Hexagon::L4_return_f:
2199 case Hexagon::L4_return_fnew_pnt:
2200 case Hexagon::L4_return_fnew_pt:
2201 case Hexagon::L4_return_tnew_pnt:
2202 case Hexagon::L4_return_tnew_pt:
2210 case Hexagon::J2_jumpr:
2211 case Hexagon::J2_jumprt:
2212 case Hexagon::J2_jumprf:
2213 case Hexagon::J2_jumprtnewpt:
2214 case Hexagon::J2_jumprfnewpt:
2215 case Hexagon::J2_jumprtnew:
2216 case Hexagon::J2_jumprfnew:
2227 unsigned offset)
const {
2231 return isInt<11>(offset);
2237 case Hexagon::J2_jump:
2238 case Hexagon::J2_call:
2239 case Hexagon::PS_call_nr:
2240 return isInt<24>(offset);
2241 case Hexagon::J2_jumpt:
2242 case Hexagon::J2_jumpf:
2243 case Hexagon::J2_jumptnew:
2244 case Hexagon::J2_jumptnewpt:
2245 case Hexagon::J2_jumpfnew:
2246 case Hexagon::J2_jumpfnewpt:
2247 case Hexagon::J2_callt:
2248 case Hexagon::J2_callf:
2249 return isInt<17>(offset);
2250 case Hexagon::J2_loop0i:
2251 case Hexagon::J2_loop0iext:
2252 case Hexagon::J2_loop0r:
2253 case Hexagon::J2_loop0rext:
2254 case Hexagon::J2_loop1i:
2255 case Hexagon::J2_loop1iext:
2256 case Hexagon::J2_loop1r:
2257 case Hexagon::J2_loop1rext:
2258 return isInt<9>(offset);
2260 case Hexagon::J4_cmpeqi_tp0_jump_nt:
2261 case Hexagon::J4_cmpeqi_tp1_jump_nt:
2262 case Hexagon::J4_cmpeqn1_tp0_jump_nt:
2263 case Hexagon::J4_cmpeqn1_tp1_jump_nt:
2264 return isInt<11>(offset);
2278 if (isLate && isEarly) {
2288 case TargetOpcode::EXTRACT_SUBREG:
2289 case TargetOpcode::INSERT_SUBREG:
2290 case TargetOpcode::SUBREG_TO_REG:
2291 case TargetOpcode::REG_SEQUENCE:
2292 case TargetOpcode::IMPLICIT_DEF:
2293 case TargetOpcode::COPY:
2295 case TargetOpcode::PHI:
2302 return !
is_TC1(SchedClass);
2313 return Opcode == Hexagon::J2_loop0i ||
2314 Opcode == Hexagon::J2_loop0r ||
2315 Opcode == Hexagon::J2_loop0iext ||
2316 Opcode == Hexagon::J2_loop0rext ||
2317 Opcode == Hexagon::J2_loop1i ||
2318 Opcode == Hexagon::J2_loop1r ||
2319 Opcode == Hexagon::J2_loop1iext ||
2320 Opcode == Hexagon::J2_loop1rext;
2325 default:
return false;
2326 case Hexagon::L4_iadd_memopw_io:
2327 case Hexagon::L4_isub_memopw_io:
2328 case Hexagon::L4_add_memopw_io:
2329 case Hexagon::L4_sub_memopw_io:
2330 case Hexagon::L4_and_memopw_io:
2331 case Hexagon::L4_or_memopw_io:
2332 case Hexagon::L4_iadd_memoph_io:
2333 case Hexagon::L4_isub_memoph_io:
2334 case Hexagon::L4_add_memoph_io:
2335 case Hexagon::L4_sub_memoph_io:
2336 case Hexagon::L4_and_memoph_io:
2337 case Hexagon::L4_or_memoph_io:
2338 case Hexagon::L4_iadd_memopb_io:
2339 case Hexagon::L4_isub_memopb_io:
2340 case Hexagon::L4_add_memopb_io:
2341 case Hexagon::L4_sub_memopb_io:
2342 case Hexagon::L4_and_memopb_io:
2343 case Hexagon::L4_or_memopb_io:
2344 case Hexagon::L4_ior_memopb_io:
2345 case Hexagon::L4_ior_memoph_io:
2346 case Hexagon::L4_ior_memopw_io:
2347 case Hexagon::L4_iand_memopb_io:
2348 case Hexagon::L4_iand_memoph_io:
2349 case Hexagon::L4_iand_memopw_io:
2361 const uint64_t
F =
get(Opcode).TSFlags;
2383 const uint64_t
F =
get(Opcode).TSFlags;
2389 unsigned OperandNum)
const {
2402 const uint64_t
F =
get(Opcode).TSFlags;
2414 const uint64_t
F =
get(Opcode).TSFlags;
2422 const uint64_t
F =
get(Opcode).TSFlags;
2427 const uint64_t
F =
get(Opcode).TSFlags;
2432 const uint64_t
F =
get(Opcode).TSFlags;
2439 return MI.
getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2440 MI.
getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2441 MI.
getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2442 MI.
getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2448 case Hexagon::L2_loadrb_io:
2449 case Hexagon::L4_loadrb_ur:
2450 case Hexagon::L4_loadrb_ap:
2451 case Hexagon::L2_loadrb_pr:
2452 case Hexagon::L2_loadrb_pbr:
2453 case Hexagon::L2_loadrb_pi:
2454 case Hexagon::L2_loadrb_pci:
2455 case Hexagon::L2_loadrb_pcr:
2456 case Hexagon::L2_loadbsw2_io:
2457 case Hexagon::L4_loadbsw2_ur:
2458 case Hexagon::L4_loadbsw2_ap:
2459 case Hexagon::L2_loadbsw2_pr:
2460 case Hexagon::L2_loadbsw2_pbr:
2461 case Hexagon::L2_loadbsw2_pi:
2462 case Hexagon::L2_loadbsw2_pci:
2463 case Hexagon::L2_loadbsw2_pcr:
2464 case Hexagon::L2_loadbsw4_io:
2465 case Hexagon::L4_loadbsw4_ur:
2466 case Hexagon::L4_loadbsw4_ap:
2467 case Hexagon::L2_loadbsw4_pr:
2468 case Hexagon::L2_loadbsw4_pbr:
2469 case Hexagon::L2_loadbsw4_pi:
2470 case Hexagon::L2_loadbsw4_pci:
2471 case Hexagon::L2_loadbsw4_pcr:
2472 case Hexagon::L4_loadrb_rr:
2473 case Hexagon::L2_ploadrbt_io:
2474 case Hexagon::L2_ploadrbt_pi:
2475 case Hexagon::L2_ploadrbf_io:
2476 case Hexagon::L2_ploadrbf_pi:
2477 case Hexagon::L2_ploadrbtnew_io:
2478 case Hexagon::L2_ploadrbfnew_io:
2479 case Hexagon::L4_ploadrbt_rr:
2480 case Hexagon::L4_ploadrbf_rr:
2481 case Hexagon::L4_ploadrbtnew_rr:
2482 case Hexagon::L4_ploadrbfnew_rr:
2483 case Hexagon::L2_ploadrbtnew_pi:
2484 case Hexagon::L2_ploadrbfnew_pi:
2485 case Hexagon::L4_ploadrbt_abs:
2486 case Hexagon::L4_ploadrbf_abs:
2487 case Hexagon::L4_ploadrbtnew_abs:
2488 case Hexagon::L4_ploadrbfnew_abs:
2489 case Hexagon::L2_loadrbgp:
2491 case Hexagon::L2_loadrh_io:
2492 case Hexagon::L4_loadrh_ur:
2493 case Hexagon::L4_loadrh_ap:
2494 case Hexagon::L2_loadrh_pr:
2495 case Hexagon::L2_loadrh_pbr:
2496 case Hexagon::L2_loadrh_pi:
2497 case Hexagon::L2_loadrh_pci:
2498 case Hexagon::L2_loadrh_pcr:
2499 case Hexagon::L4_loadrh_rr:
2500 case Hexagon::L2_ploadrht_io:
2501 case Hexagon::L2_ploadrht_pi:
2502 case Hexagon::L2_ploadrhf_io:
2503 case Hexagon::L2_ploadrhf_pi:
2504 case Hexagon::L2_ploadrhtnew_io:
2505 case Hexagon::L2_ploadrhfnew_io:
2506 case Hexagon::L4_ploadrht_rr:
2507 case Hexagon::L4_ploadrhf_rr:
2508 case Hexagon::L4_ploadrhtnew_rr:
2509 case Hexagon::L4_ploadrhfnew_rr:
2510 case Hexagon::L2_ploadrhtnew_pi:
2511 case Hexagon::L2_ploadrhfnew_pi:
2512 case Hexagon::L4_ploadrht_abs:
2513 case Hexagon::L4_ploadrhf_abs:
2514 case Hexagon::L4_ploadrhtnew_abs:
2515 case Hexagon::L4_ploadrhfnew_abs:
2516 case Hexagon::L2_loadrhgp:
2530 case Hexagon::STriw_pred:
2531 case Hexagon::LDriw_pred:
2543 if (
Op.isGlobal() ||
Op.isSymbol())
2551 return is_TC1(SchedClass);
2556 return is_TC2(SchedClass);
2576 for (
int I = 0;
I <
N;
I++)
2581 if (MI2.
getOpcode() == Hexagon::V6_vS32b_pi)
2589 const uint64_t V =
getType(MI);
2596 if (Offset % Size != 0)
2598 int Count = Offset /
Size;
2613 return isInt<4>(Count);
2623 return isInt<3>(Count);
2642 case Hexagon::PS_vstorerq_ai:
2643 case Hexagon::PS_vstorerw_ai:
2644 case Hexagon::PS_vstorerw_nt_ai:
2645 case Hexagon::PS_vloadrq_ai:
2646 case Hexagon::PS_vloadrw_ai:
2647 case Hexagon::PS_vloadrw_nt_ai:
2648 case Hexagon::V6_vL32b_ai:
2649 case Hexagon::V6_vS32b_ai:
2650 case Hexagon::V6_vL32b_nt_ai:
2651 case Hexagon::V6_vS32b_nt_ai:
2652 case Hexagon::V6_vL32Ub_ai:
2653 case Hexagon::V6_vS32Ub_ai: {
2654 unsigned VectorSize = TRI->
getSpillSize(Hexagon::HvxVRRegClass);
2656 if (Offset & (VectorSize-1))
2658 return isInt<4>(Offset >>
Log2_32(VectorSize));
2661 case Hexagon::J2_loop0i:
2662 case Hexagon::J2_loop1i:
2663 return isUInt<10>(
Offset);
2665 case Hexagon::S4_storeirb_io:
2666 case Hexagon::S4_storeirbt_io:
2667 case Hexagon::S4_storeirbf_io:
2668 return isUInt<6>(
Offset);
2670 case Hexagon::S4_storeirh_io:
2671 case Hexagon::S4_storeirht_io:
2672 case Hexagon::S4_storeirhf_io:
2673 return isShiftedUInt<6,1>(
Offset);
2675 case Hexagon::S4_storeiri_io:
2676 case Hexagon::S4_storeirit_io:
2677 case Hexagon::S4_storeirif_io:
2678 return isShiftedUInt<6,2>(
Offset);
2685 case Hexagon::L2_loadri_io:
2686 case Hexagon::S2_storeri_io:
2687 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2690 case Hexagon::L2_loadrd_io:
2691 case Hexagon::S2_storerd_io:
2692 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2695 case Hexagon::L2_loadrh_io:
2696 case Hexagon::L2_loadruh_io:
2697 case Hexagon::S2_storerh_io:
2698 case Hexagon::S2_storerf_io:
2699 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2702 case Hexagon::L2_loadrb_io:
2703 case Hexagon::L2_loadrub_io:
2704 case Hexagon::S2_storerb_io:
2705 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2708 case Hexagon::A2_addi:
2709 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2712 case Hexagon::L4_iadd_memopw_io:
2713 case Hexagon::L4_isub_memopw_io:
2714 case Hexagon::L4_add_memopw_io:
2715 case Hexagon::L4_sub_memopw_io:
2716 case Hexagon::L4_and_memopw_io:
2717 case Hexagon::L4_or_memopw_io:
2718 return (0 <= Offset && Offset <= 255);
2720 case Hexagon::L4_iadd_memoph_io:
2721 case Hexagon::L4_isub_memoph_io:
2722 case Hexagon::L4_add_memoph_io:
2723 case Hexagon::L4_sub_memoph_io:
2724 case Hexagon::L4_and_memoph_io:
2725 case Hexagon::L4_or_memoph_io:
2726 return (0 <= Offset && Offset <= 127);
2728 case Hexagon::L4_iadd_memopb_io:
2729 case Hexagon::L4_isub_memopb_io:
2730 case Hexagon::L4_add_memopb_io:
2731 case Hexagon::L4_sub_memopb_io:
2732 case Hexagon::L4_and_memopb_io:
2733 case Hexagon::L4_or_memopb_io:
2734 return (0 <= Offset && Offset <= 63);
2738 case Hexagon::STriw_pred:
2739 case Hexagon::LDriw_pred:
2740 case Hexagon::STriw_ctr:
2741 case Hexagon::LDriw_ctr:
2744 case Hexagon::PS_fi:
2745 case Hexagon::PS_fia:
2749 case Hexagon::L2_ploadrbt_io:
2750 case Hexagon::L2_ploadrbf_io:
2751 case Hexagon::L2_ploadrubt_io:
2752 case Hexagon::L2_ploadrubf_io:
2753 case Hexagon::S2_pstorerbt_io:
2754 case Hexagon::S2_pstorerbf_io:
2755 return isUInt<6>(
Offset);
2757 case Hexagon::L2_ploadrht_io:
2758 case Hexagon::L2_ploadrhf_io:
2759 case Hexagon::L2_ploadruht_io:
2760 case Hexagon::L2_ploadruhf_io:
2761 case Hexagon::S2_pstorerht_io:
2762 case Hexagon::S2_pstorerhf_io:
2763 return isShiftedUInt<6,1>(
Offset);
2765 case Hexagon::L2_ploadrit_io:
2766 case Hexagon::L2_ploadrif_io:
2767 case Hexagon::S2_pstorerit_io:
2768 case Hexagon::S2_pstorerif_io:
2769 return isShiftedUInt<6,2>(
Offset);
2771 case Hexagon::L2_ploadrdt_io:
2772 case Hexagon::L2_ploadrdf_io:
2773 case Hexagon::S2_pstorerdt_io:
2774 case Hexagon::S2_pstorerdf_io:
2775 return isShiftedUInt<6,3>(
Offset);
2779 "Please define it in the above switch statement!");
2787 const uint64_t
F =
get(MI.
getOpcode()).TSFlags;
2811 case Hexagon::L2_loadrub_io:
2812 case Hexagon::L4_loadrub_ur:
2813 case Hexagon::L4_loadrub_ap:
2814 case Hexagon::L2_loadrub_pr:
2815 case Hexagon::L2_loadrub_pbr:
2816 case Hexagon::L2_loadrub_pi:
2817 case Hexagon::L2_loadrub_pci:
2818 case Hexagon::L2_loadrub_pcr:
2819 case Hexagon::L2_loadbzw2_io:
2820 case Hexagon::L4_loadbzw2_ur:
2821 case Hexagon::L4_loadbzw2_ap:
2822 case Hexagon::L2_loadbzw2_pr:
2823 case Hexagon::L2_loadbzw2_pbr:
2824 case Hexagon::L2_loadbzw2_pi:
2825 case Hexagon::L2_loadbzw2_pci:
2826 case Hexagon::L2_loadbzw2_pcr:
2827 case Hexagon::L2_loadbzw4_io:
2828 case Hexagon::L4_loadbzw4_ur:
2829 case Hexagon::L4_loadbzw4_ap:
2830 case Hexagon::L2_loadbzw4_pr:
2831 case Hexagon::L2_loadbzw4_pbr:
2832 case Hexagon::L2_loadbzw4_pi:
2833 case Hexagon::L2_loadbzw4_pci:
2834 case Hexagon::L2_loadbzw4_pcr:
2835 case Hexagon::L4_loadrub_rr:
2836 case Hexagon::L2_ploadrubt_io:
2837 case Hexagon::L2_ploadrubt_pi:
2838 case Hexagon::L2_ploadrubf_io:
2839 case Hexagon::L2_ploadrubf_pi:
2840 case Hexagon::L2_ploadrubtnew_io:
2841 case Hexagon::L2_ploadrubfnew_io:
2842 case Hexagon::L4_ploadrubt_rr:
2843 case Hexagon::L4_ploadrubf_rr:
2844 case Hexagon::L4_ploadrubtnew_rr:
2845 case Hexagon::L4_ploadrubfnew_rr:
2846 case Hexagon::L2_ploadrubtnew_pi:
2847 case Hexagon::L2_ploadrubfnew_pi:
2848 case Hexagon::L4_ploadrubt_abs:
2849 case Hexagon::L4_ploadrubf_abs:
2850 case Hexagon::L4_ploadrubtnew_abs:
2851 case Hexagon::L4_ploadrubfnew_abs:
2852 case Hexagon::L2_loadrubgp:
2854 case Hexagon::L2_loadruh_io:
2855 case Hexagon::L4_loadruh_ur:
2856 case Hexagon::L4_loadruh_ap:
2857 case Hexagon::L2_loadruh_pr:
2858 case Hexagon::L2_loadruh_pbr:
2859 case Hexagon::L2_loadruh_pi:
2860 case Hexagon::L2_loadruh_pci:
2861 case Hexagon::L2_loadruh_pcr:
2862 case Hexagon::L4_loadruh_rr:
2863 case Hexagon::L2_ploadruht_io:
2864 case Hexagon::L2_ploadruht_pi:
2865 case Hexagon::L2_ploadruhf_io:
2866 case Hexagon::L2_ploadruhf_pi:
2867 case Hexagon::L2_ploadruhtnew_io:
2868 case Hexagon::L2_ploadruhfnew_io:
2869 case Hexagon::L4_ploadruht_rr:
2870 case Hexagon::L4_ploadruhf_rr:
2871 case Hexagon::L4_ploadruhtnew_rr:
2872 case Hexagon::L4_ploadruhfnew_rr:
2873 case Hexagon::L2_ploadruhtnew_pi:
2874 case Hexagon::L2_ploadruhfnew_pi:
2875 case Hexagon::L4_ploadruht_abs:
2876 case Hexagon::L4_ploadruhf_abs:
2877 case Hexagon::L4_ploadruhtnew_abs:
2878 case Hexagon::L4_ploadruhfnew_abs:
2879 case Hexagon::L2_loadruhgp:
2899 unsigned AccessSize = 0;
2902 "getMemOperandWithOffset only supports base " 2903 "operands of type register.");
2904 return BaseOp !=
nullptr;
2922 if (!Stored.
isReg())
2935 return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
2951 if (Hexagon::getRegForm(MI.
getOpcode()) >= 0)
2961 NonExtOpcode = Hexagon::changeAddrMode_abs_io(MI.
getOpcode());
2967 NonExtOpcode = Hexagon::changeAddrMode_io_rr(MI.
getOpcode());
2970 NonExtOpcode = Hexagon::changeAddrMode_ur_rr(MI.
getOpcode());
2975 if (NonExtOpcode < 0)
2983 return Hexagon::getRealHWInstr(MI.
getOpcode(),
2984 Hexagon::InstrType_Pseudo) >= 0;
3002 Subtarget.hasV60Ops();
3007 if (MI.
mayStore() && !Subtarget.useNewValueStores())
3041 if (!MII->isBundle())
3044 for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3053 unsigned PredReg)
const {
3056 if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
3058 if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3065 case Hexagon::A4_addp_c:
3066 case Hexagon::A4_subp_c:
3067 case Hexagon::A4_tlbmatch:
3068 case Hexagon::A5_ACS:
3069 case Hexagon::F2_sfinvsqrta:
3070 case Hexagon::F2_sfrecipa:
3071 case Hexagon::J2_endloop0:
3072 case Hexagon::J2_endloop01:
3073 case Hexagon::J2_ploop1si:
3074 case Hexagon::J2_ploop1sr:
3075 case Hexagon::J2_ploop2si:
3076 case Hexagon::J2_ploop2sr:
3077 case Hexagon::J2_ploop3si:
3078 case Hexagon::J2_ploop3sr:
3079 case Hexagon::S2_cabacdecbin:
3080 case Hexagon::S2_storew_locked:
3081 case Hexagon::S4_stored_locked:
3088 return Opcode == Hexagon::J2_jumpt ||
3089 Opcode == Hexagon::J2_jumptpt ||
3090 Opcode == Hexagon::J2_jumpf ||
3091 Opcode == Hexagon::J2_jumpfpt ||
3092 Opcode == Hexagon::J2_jumptnew ||
3093 Opcode == Hexagon::J2_jumpfnew ||
3094 Opcode == Hexagon::J2_jumptnewpt ||
3095 Opcode == Hexagon::J2_jumpfnewpt;
3115 unsigned &AccessSize)
const {
3124 unsigned BasePos = 0, OffsetPos = 0;
3134 if (!OffsetOp.
isImm())
3136 Offset = OffsetOp.
getImm();
3147 unsigned &BasePos,
unsigned &OffsetPos)
const {
3212 while (I->isDebugInstr()) {
3217 if (!isUnpredicatedTerminator(*I))
3226 if (&*I != LastInst && !I->
isBundle() && isUnpredicatedTerminator(*I)) {
3227 if (!SecondLastInst) {
3228 SecondLastInst = &*
I;
3250 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3261 case Hexagon::C2_cmpeq:
3262 case Hexagon::C2_cmpgt:
3263 case Hexagon::C2_cmpgtu:
3267 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
3268 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3272 case Hexagon::C2_cmpeqi:
3273 case Hexagon::C2_cmpgti:
3274 case Hexagon::C2_cmpgtui:
3278 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
3279 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3285 case Hexagon::A2_tfr:
3292 case Hexagon::A2_tfrsi:
3300 case Hexagon::S2_tstbit_i:
3303 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
3304 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3313 case Hexagon::J2_jumptnew:
3314 case Hexagon::J2_jumpfnew:
3315 case Hexagon::J2_jumptnewpt:
3316 case Hexagon::J2_jumpfnewpt:
3318 if (Hexagon::PredRegsRegClass.
contains(Src1Reg) &&
3319 (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3326 case Hexagon::J2_jump:
3327 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3328 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3340 if ((GA.
getOpcode() != Hexagon::C2_cmpeqi) ||
3341 (GB.
getOpcode() != Hexagon::J2_jumptnew))
3346 if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
3354 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqn1_tp0_jump_nt
3355 : Hexagon::J4_cmpeqn1_tp1_jump_nt;
3358 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqi_tp0_jump_nt
3359 : Hexagon::J4_cmpeqi_tp1_jump_nt;
3363 enum Hexagon::PredSense inPredSense;
3364 inPredSense = invertPredicate ? Hexagon::PredSense_false :
3365 Hexagon::PredSense_true;
3366 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3367 if (CondOpcode >= 0)
3377 case Hexagon::V6_vL32b_pi:
3378 return Hexagon::V6_vL32b_cur_pi;
3379 case Hexagon::V6_vL32b_ai:
3380 return Hexagon::V6_vL32b_cur_ai;
3381 case Hexagon::V6_vL32b_nt_pi:
3382 return Hexagon::V6_vL32b_nt_cur_pi;
3383 case Hexagon::V6_vL32b_nt_ai:
3384 return Hexagon::V6_vL32b_nt_cur_ai;
3393 case Hexagon::V6_vL32b_cur_pi:
3394 return Hexagon::V6_vL32b_pi;
3395 case Hexagon::V6_vL32b_cur_ai:
3396 return Hexagon::V6_vL32b_ai;
3397 case Hexagon::V6_vL32b_nt_cur_pi:
3398 return Hexagon::V6_vL32b_nt_pi;
3399 case Hexagon::V6_vL32b_nt_cur_ai:
3400 return Hexagon::V6_vL32b_nt_ai;
3488 int NVOpcode = Hexagon::getNewValueOpcode(MI.
getOpcode());
3496 case Hexagon::S4_storerb_ur:
3497 return Hexagon::S4_storerbnew_ur;
3499 case Hexagon::S2_storerb_pci:
3500 return Hexagon::S2_storerb_pci;
3502 case Hexagon::S2_storeri_pci:
3503 return Hexagon::S2_storeri_pci;
3505 case Hexagon::S2_storerh_pci:
3506 return Hexagon::S2_storerh_pci;
3508 case Hexagon::S2_storerd_pci:
3509 return Hexagon::S2_storerd_pci;
3511 case Hexagon::S2_storerf_pci:
3512 return Hexagon::S2_storerf_pci;
3514 case Hexagon::V6_vS32b_ai:
3515 return Hexagon::V6_vS32b_new_ai;
3517 case Hexagon::V6_vS32b_pi:
3518 return Hexagon::V6_vS32b_new_pi;
3543 if (BrTarget.
isMBB()) {
3545 Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3558 bool SawCond =
false, Bad =
false;
3562 if (
I.isConditionalBranch()) {
3569 if (
I.isUnconditionalBranch() && !SawCond) {
3577 if (NextIt == B.instr_end()) {
3580 if (!B.isLayoutSuccessor(SB))
3582 Taken = getEdgeProbability(Src, SB) < OneHalf;
3586 assert(NextIt->isUnconditionalBranch());
3595 Taken = BT && getEdgeProbability(Src, BT) < OneHalf;
3603 case Hexagon::J2_jumpt:
3604 return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3605 case Hexagon::J2_jumpf:
3606 return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3618 case Hexagon::J2_jumpt:
3619 case Hexagon::J2_jumpf:
3623 int NewOpcode = Hexagon::getPredNewOpcode(MI.
getOpcode());
3632 NewOp = Hexagon::getPredOldOpcode(NewOp);
3636 if (!Subtarget.getFeatureBits()[Hexagon::ArchV60]) {
3638 case Hexagon::J2_jumptpt:
3639 NewOp = Hexagon::J2_jumpt;
3641 case Hexagon::J2_jumpfpt:
3642 NewOp = Hexagon::J2_jumpf;
3644 case Hexagon::J2_jumprtpt:
3645 NewOp = Hexagon::J2_jumprt;
3647 case Hexagon::J2_jumprfpt:
3648 NewOp = Hexagon::J2_jumprf;
3653 "Couldn't change predicate new instruction to its old form.");
3657 NewOp = Hexagon::getNonNVStore(NewOp);
3658 assert(NewOp >= 0 &&
"Couldn't change new-value store to its old form.");
3661 if (Subtarget.hasV60Ops())
3666 case Hexagon::J2_jumpfpt:
3667 return Hexagon::J2_jumpf;
3668 case Hexagon::J2_jumptpt:
3669 return Hexagon::J2_jumpt;
3670 case Hexagon::J2_jumprfpt:
3671 return Hexagon::J2_jumprf;
3672 case Hexagon::J2_jumprtpt:
3673 return Hexagon::J2_jumprt;
3682 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3693 case Hexagon::L2_loadri_io:
3699 if (Hexagon::IntRegsRegClass.
contains(SrcReg) &&
3711 case Hexagon::L2_loadrub_io:
3729 case Hexagon::L2_loadrh_io:
3730 case Hexagon::L2_loadruh_io:
3739 case Hexagon::L2_loadrb_io:
3748 case Hexagon::L2_loadrd_io:
3753 Hexagon::IntRegsRegClass.
contains(SrcReg) &&
3761 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3762 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3763 case Hexagon::L4_return:
3764 case Hexagon::L2_deallocframe:
3766 case Hexagon::EH_RETURN_JMPR:
3767 case Hexagon::PS_jmpret:
3768 case Hexagon::SL2_jumpr31:
3772 if (Hexagon::IntRegsRegClass.
contains(DstReg) && (Hexagon::R31 == DstReg))
3775 case Hexagon::PS_jmprett:
3776 case Hexagon::PS_jmpretf:
3777 case Hexagon::PS_jmprettnewpt:
3778 case Hexagon::PS_jmpretfnewpt:
3779 case Hexagon::PS_jmprettnew:
3780 case Hexagon::PS_jmpretfnew:
3781 case Hexagon::SL2_jumpr31_t:
3782 case Hexagon::SL2_jumpr31_f:
3783 case Hexagon::SL2_jumpr31_tnew:
3787 if ((Hexagon::PredRegsRegClass.
contains(SrcReg) &&
3788 (Hexagon::P0 == SrcReg)) &&
3789 (Hexagon::IntRegsRegClass.
contains(DstReg) && (Hexagon::R31 == DstReg)))
3792 case Hexagon::L4_return_t:
3793 case Hexagon::L4_return_f:
3794 case Hexagon::L4_return_tnew_pnt:
3795 case Hexagon::L4_return_fnew_pnt:
3796 case Hexagon::L4_return_tnew_pt:
3797 case Hexagon::L4_return_fnew_pt:
3800 if (Hexagon::PredRegsRegClass.
contains(SrcReg) && (Hexagon::P0 == SrcReg))
3808 case Hexagon::S2_storeri_io:
3813 if (Hexagon::IntRegsRegClass.
contains(Src1Reg) &&
3824 case Hexagon::S2_storerb_io:
3841 case Hexagon::S2_storerh_io:
3850 case Hexagon::S2_storerd_io:
3855 Hexagon::IntRegsRegClass.
contains(Src1Reg) &&
3860 case Hexagon::S4_storeiri_io:
3868 case Hexagon::S4_storeirb_io:
3876 case Hexagon::S2_allocframe:
3899 case Hexagon::A2_addi:
3904 if (Hexagon::IntRegsRegClass.
contains(SrcReg) &&
3920 case Hexagon::A2_add:
3929 case Hexagon::A2_andir:
3941 case Hexagon::A2_tfr:
3948 case Hexagon::A2_tfrsi:
3957 case Hexagon::C2_cmoveit:
3958 case Hexagon::C2_cmovenewit:
3959 case Hexagon::C2_cmoveif:
3960 case Hexagon::C2_cmovenewif:
3967 Hexagon::PredRegsRegClass.
contains(SrcReg) && Hexagon::P0 == SrcReg &&
3971 case Hexagon::C2_cmpeqi:
3975 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
3980 case Hexagon::A2_combineii:
3981 case Hexagon::A4_combineii:
3993 case Hexagon::A4_combineri:
4002 case Hexagon::A4_combineir:
4011 case Hexagon::A2_sxtb:
4012 case Hexagon::A2_sxth:
4013 case Hexagon::A2_zxtb:
4014 case Hexagon::A2_zxth:
4027 return Hexagon::getRealHWInstr(MI.
getOpcode(), Hexagon::InstrType_Real);
4054 unsigned UseIdx)
const {
4060 if (DefMO.
isReg() && HRI.isPhysicalRegister(DefMO.
getReg())) {
4101 Cond[0].setImm(Opc);
4108 : Hexagon::getTruePredOpcode(Opc);
4109 if (InvPredOpcode >= 0)
4110 return InvPredOpcode;
4124 return ~(-1U << (bits - 1));
4126 return ~(-1U <<
bits);
4132 case Hexagon::L2_loadrbgp:
4133 case Hexagon::L2_loadrdgp:
4134 case Hexagon::L2_loadrhgp:
4135 case Hexagon::L2_loadrigp:
4136 case Hexagon::L2_loadrubgp:
4137 case Hexagon::L2_loadruhgp:
4138 case Hexagon::S2_storerbgp:
4139 case Hexagon::S2_storerbnewgp:
4140 case Hexagon::S2_storerhgp:
4141 case Hexagon::S2_storerhnewgp:
4142 case Hexagon::S2_storerigp:
4143 case Hexagon::S2_storerinewgp:
4144 case Hexagon::S2_storerdgp:
4145 case Hexagon::S2_storerfgp:
4159 using namespace HexagonII;
4171 return HRI.getSpillSize(Hexagon::HvxVRRegClass);
4186 return -1U << (bits - 1);
4195 short NonExtOpcode = Hexagon::getRegForm(MI.
getOpcode());
4196 if (NonExtOpcode >= 0)
4197 return NonExtOpcode;
4203 return Hexagon::changeAddrMode_abs_io(MI.
getOpcode());
4205 return Hexagon::changeAddrMode_io_rr(MI.
getOpcode());
4207 return Hexagon::changeAddrMode_ur_rr(MI.
getOpcode());
4217 unsigned &PredReg,
unsigned &PredRegPos,
unsigned &PredRegFlags)
const {
4225 PredReg = Cond[1].getReg();
4229 if (Cond[1].isImplicit())
4237 return Hexagon::getRealHWInstr(MI.
getOpcode(), Hexagon::InstrType_Pseudo);
4241 return Hexagon::getRegForm(MI.
getOpcode());
4268 unsigned NumDefs = 0;
4301 assert(BundleHead->isBundle() &&
"Not a bundle header");
4311 "Instruction must be extendable");
4317 "Branch with unknown extendable field type");
4351 for (
unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4352 insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4353 NewMI =
BuildMI(B, I, DL,
get(insn));
4373 int PredRevOpcode = -1;
4375 PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4377 PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4378 assert(PredRevOpcode > 0);
4379 return PredRevOpcode;
4385 return Cond.
empty() || (Cond[0].isImm() && (Cond.
size() != 1));
4392 if (Operand.isImm())
4393 Operand.
setImm(Operand.getImm() | memShufDisabledMask);
4401 return (Operand.isImm() && (Operand.getImm() & memShufDisabledMask) != 0);
4406 return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4410 return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4414 return Opc >= 0 ? Hexagon::changeAddrMode_io_pi(Opc) : Opc;
4418 return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4422 return Opc >= 0 ? Hexagon::changeAddrMode_pi_io(Opc) : Opc;
4426 return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4430 return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4434 return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
static bool isReg(const MCInst &MI, unsigned OpNo)
unsigned getTargetFlags() const
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
short getNonExtOpcode(const MachineInstr &MI) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Return an array that contains the direct target flag values and their names.
bool isVecALU(const MachineInstr &MI) const
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool DefinesPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred) const override
If the specified instruction defines any predicate or condition code register(s) used for predication...
This class is the base class for the comparison instructions.
short changeAddrMode_rr_io(short Opc) const
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
instr_iterator instr_begin()
const int Hexagon_MEMH_OFFSET_MAX
bool is_TC2early(unsigned SchedClass)
bool isCall(QueryType Type=AnyInBundle) const
unsigned nonDbgBBSize(const MachineBasicBlock *BB) const
getInstrTimingClassLatency - Compute the instruction latency of a given instruction using Timing Clas...
instr_iterator instr_end()
MachineBasicBlock * getMBB() const
const int Hexagon_ADDI_OFFSET_MAX
unsigned getFrameRegister(const MachineFunction &MF) const override
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
const int Hexagon_MEMH_OFFSET_MIN
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class represents lattice values for constants.
const InstrStage * beginStage(unsigned ItinClassIndx) const
Return the first stage of the itinerary.
static void parseOperands(const MachineInstr &MI, SmallVector< unsigned, 4 > &Defs, SmallVector< unsigned, 8 > &Uses)
Gather register def/uses from MI.
static cl::opt< bool > UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Use the DFA based hazard recognizer."))
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
void setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const
short getEquivalentHWInstr(const MachineInstr &MI) const
DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &STI) const override
Create machine specific model for scheduling.
void push_back(const T &Elt)
bool isAbsoluteSet(const MachineInstr &MI) const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
bool isJumpR(const MachineInstr &MI) const
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
Returns true if the first specified predicate subsumes the second, e.g.
Describe properties that are true of each instruction in the target description file.
unsigned getUnits() const
Returns the choice of FUs.
bool isConstExtended(const MachineInstr &MI) const
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
bool getInvertedPredSense(SmallVectorImpl< MachineOperand > &Cond) const
Address of indexed Jump Table for switch.
unsigned nonDbgBundleSize(MachineBasicBlock::const_iterator BundleHead) const
unsigned getSubReg() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
int getMaxValue(const MachineInstr &MI) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
Return an array that contains the bitmask target flag values and their names.
const int Hexagon_ADDI_OFFSET_MIN
bool reversePredSense(MachineInstr &MI) const
int getDotNewPredOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
MachineBasicBlock reference.
#define HEXAGON_INSTR_SIZE
bool isExpr(unsigned OpType) const
bool isTailCall(const MachineInstr &MI) const override
unsigned const TargetRegisterInfo * TRI
unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr *> &PrevInsts, unsigned Iter, unsigned MaxIter) const override
Generate code to reduce the loop iteration by one and check if the loop is finished.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool producesStall(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
MachineInstrBundleIterator< const MachineInstr > const_iterator
const HexagonFrameLowering * getFrameLowering() const override
unsigned getMemAccessSize(const MachineInstr &MI) const
const int Hexagon_MEMD_OFFSET_MAX
unsigned getSize(const MachineInstr &MI) const
int getDotCurOp(const MachineInstr &MI) const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool isLateResultInstr(const MachineInstr &MI) const
iterator_range< mop_iterator > operands()
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool is_TC1(unsigned SchedClass)
void immediateExtend(MachineInstr &MI) const
immediateExtend - Changes the instruction in place to one using an immediate extender.
unsigned getStackRegister() const
int getDotNewPredJumpOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
short changeAddrMode_ur_rr(short Opc) const
bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const override
Check if the instruction or the bundle of instructions has load from stack slots. ...
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
return AArch64::GPR64RegClass contains(Reg)
iterator_range< succ_iterator > successors()
bool isToBeScheduledASAP(const MachineInstr &MI1, const MachineInstr &MI2) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
Insert a noop into the instruction stream at the specified point.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e...
bool isHVXVec(const MachineInstr &MI) const
static cl::opt< bool > BranchRelaxAsmLarge("branch-relax-asm-large", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"))
bool isComplex(const MachineInstr &MI) const
unsigned getSpillAlignment(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class...
static cl::opt< bool > DisableNVSchedule("disable-hexagon-nv-schedule", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Disable schedule adjustment for new value stores."))
A description of a memory reference used in the backend.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
unsigned getInvertedPredicatedOpcode(const int Opc) const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MCSuperRegIterator enumerates all super-registers of Reg.
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
unsigned getNumOperands() const
Retuns the total number of operands.
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_NODISCARD size_t count(char C) const
Return the number of occurrences of C in the string.
unsigned getCompoundOpcode(const MachineInstr &GA, const MachineInstr &GB) const
bool isPredicatedNew(const MachineInstr &MI) const
bool canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const
Can these instructions execute at the same time in a bundle.
bool predOpcodeHasNot(ArrayRef< MachineOperand > Cond) const
const HexagonRegisterInfo * getRegisterInfo() const override
MachineBasicBlock * getBottomBlock()
Return the "bottom" block in the loop, which is the last block in the linear layout, ignoring any parts of the loop not contiguous with the part that contains the header.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
bool isVecUsableNextPacket(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Name of external global symbol.
static StringRef getName(Value *V)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool isDotNewInst(const MachineInstr &MI) const
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isDeallocRet(const MachineInstr &MI) const
bool isExtended(const MachineInstr &MI) const
bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const override
Check if the instruction or the bundle of instructions has store to stack slots.
const char * getSymbolName() const
bool is_TC2(unsigned SchedClass)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
INLINEASM - Represents an inline asm block.
bool isSolo(const MachineInstr &MI) const
bool isLateInstrFeedsEarlyInstr(const MachineInstr &LRMI, const MachineInstr &ESMI) const
MachineBasicBlock::instr_iterator expandVGatherPseudo(MachineInstr &MI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
This function is called for all pseudo instructions that remain after register allocation.
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Cond) const override
Convert the instruction into a predicated instruction.
bool predCanBeUsedAsDotNew(const MachineInstr &MI, unsigned PredReg) const
MachineInstr * findLoopInstr(MachineBasicBlock *BB, unsigned EndLoopOp, MachineBasicBlock *TargetBB, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
SmallVector< MachineInstr *, 2 > getBranchingInstrs(MachineBasicBlock &MBB) const
bool doesNotReturn(const MachineInstr &CallMI) const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isIndirectBranch(QueryType Type=AnyInBundle) const
Return true if this is an indirect branch, such as a branch through a register.
bool isEndLoopN(unsigned Opcode) const
bool isCompoundBranchInstr(const MachineInstr &MI) const
void clearKillFlags(unsigned Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
short changeAddrMode_io_rr(short Opc) const
bool isPredictedTaken(unsigned Opcode) const
instr_iterator getInstrIterator() const
int getMinValue(const MachineInstr &MI) const
bool getBundleNoShuf(const MachineInstr &MIB) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Itinerary data supplied by a subtarget to be used by a target.
short changeAddrMode_io_pi(short Opc) const
bool isTC1(const MachineInstr &MI) const
unsigned getUndefRegState(bool B)
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
Compute the instruction latency of a given instruction.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
short changeAddrMode_rr_ur(short Opc) const
reverse_iterator rbegin()
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
bool getPredReg(ArrayRef< MachineOperand > Cond, unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const
BasicBlockListType::iterator iterator
unsigned getKillRegState(bool B)
virtual const InstrItineraryData * getInstrItineraryData() const
getInstrItineraryData - Returns instruction itinerary data for the target or specific subtarget...
unsigned getCExtOpNum(const MachineInstr &MI) const
const int Hexagon_MEMD_OFFSET_MIN
MachineOperand * getBaseAndOffset(const MachineInstr &MI, int64_t &Offset, unsigned &AccessSize) const
bool isReturn(QueryType Type=AnyInBundle) const
bool is_TC3x(unsigned SchedClass)
This class is intended to be used as a base class for asm properties and features specific to the tar...
static cl::opt< bool > EnableBranchPrediction("hexagon-enable-branch-prediction", cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"))
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Address of a global value.
initializer< Ty > init(const Ty &Val)
const int Hexagon_MEMW_OFFSET_MAX
Constants for Hexagon instructions.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
static cl::opt< bool > EnableALUForwarding("enable-alu-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec alu forwarding"))
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
bool getIncrementValue(const MachineInstr &MI, int &Value) const override
If the instruction is an increment of a constant value, return the amount.
bool isCompare(QueryType Type=IgnoreBundle) const
Return true if this instruction is a comparison.
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const override
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool validateBranchCond(const ArrayRef< MachineOperand > &Cond) const
MachineInstrBuilder & UseMI
size_t size() const
size - Get the array size.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
bool isPredicable(const MachineInstr &MI) const override
Return true if the specified instruction can be predicated.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE and DBG_LABEL instructions...
bool isAccumulator(const MachineInstr &MI) const
bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const override
For instructions with a base and offset, return the position of the base register and offset operands...
bool isZeroExtendingLoad(const MachineInstr &MI) const
short changeAddrMode_io_abs(short Opc) const
bool hasUncondBranch(const MachineBasicBlock *B) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
short changeAddrMode_abs_io(short Opc) const
unsigned getAddrMode(const MachineInstr &MI) const
int getNonDotCurOp(const MachineInstr &MI) const
bool invertAndChangeJumpTarget(MachineInstr &MI, MachineBasicBlock *NewTarget) const
void setMBB(MachineBasicBlock *MBB)
bool isTC2Early(const MachineInstr &MI) const
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
Address of a basic block.
static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI)
bool isValidAutoIncImm(const EVT VT, const int Offset) const
StringRef getCommentString() const
bool isFloat(const MachineInstr &MI) const
void setImm(int64_t immVal)
self_iterator getIterator()
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
iterator_range< pred_iterator > predecessors()
HexagonII::SubInstructionGroup getDuplexCandidateGroup(const MachineInstr &MI) const
void genAllInsnTimingClasses(MachineFunction &MF) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
short getPseudoInstrPair(const MachineInstr &MI) const
MCSubRegIterator enumerates all sub-registers of Reg.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
bool isEarlySourceInstr(const MachineInstr &MI) const
bool isPostIncrement(const MachineInstr &MI) const override
Return true for post-incremented instructions.
bool isDebugInstr() const
bool isExtendable(const MachineInstr &MI) const
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MO_LO16 - On a symbol operand, this represents a relocation containing lower 16 bit of the address...
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
static cl::opt< bool > EnableACCForwarding("enable-acc-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec acc forwarding"))
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
void setIsKill(bool Val=true)
bool hasPseudoInstrPair(const MachineInstr &MI) const
The memory access writes data.
bool isIndirectCall(const MachineInstr &MI) const
bool isTC4x(const MachineInstr &MI) const
bool isDotCurInst(const MachineInstr &MI) const
static bool isUndef(ArrayRef< int > Mask)
static bool isIntRegForSubInst(unsigned Reg)
bool isConditionalBranch(QueryType Type=AnyInBundle) const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
bool isNewValueStore(const MachineInstr &MI) const
HexagonInstrInfo(HexagonSubtarget &ST)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Decompose the machine operand's target flags into two values - the direct target flag value and any o...
bool usePredicatedCalls() const
bool isNewValueInst(const MachineInstr &MI) const
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
MachineOperand class - Representation of each machine instruction operand.
bool isVecAcc(const MachineInstr &MI) const
MachineInstrBuilder MachineInstrBuilder & DefMI
bool hasEHLabel(const MachineBasicBlock *B) const
bool isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
unsigned getUnits(const MachineInstr &MI) const
short getRegForm(const MachineInstr &MI) const
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
static void getLiveRegsAt(LivePhysRegs &Regs, const MachineInstr &MI)
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
uint64_t getType(const MachineInstr &MI) const
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
bool isSignExtendingLoad(const MachineInstr &MI) const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isOperandExtended(const MachineInstr &MI, unsigned OperandNum) const
unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const override
Measure the specified inline asm to determine an approximation of its length.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
bool isDuplexPair(const MachineInstr &MIa, const MachineInstr &MIb) const
Symmetrical. See if these two instructions are fit for duplex pair.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isLateSourceInstr(const MachineInstr &MI) const
bool isSpillPredRegOp(const MachineInstr &MI) const
bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const override
Get the base register and byte offset of a load/store instr.
unsigned getMaxInstLength() const
static bool isDuplexPairMatch(unsigned Ga, unsigned Gb)
unsigned const TypeCVI_FIRST
unsigned createVR(MachineFunction *MF, MVT VT) const
HexagonInstrInfo specifics.
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Test if the given instruction should be considered a scheduling boundary.
unsigned reversePrediction(unsigned Opcode) const
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
bool isNewValue(const MachineInstr &MI) const
int findRegisterDefOperandIdx(unsigned Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a def of the specified register or -1 if it is not found...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isSaveCalleeSavedRegsCall(const MachineInstr &MI) const
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
static LLVM_ATTRIBUTE_UNUSED unsigned getMemAccessSizeInBytes(MemAccessSize S)
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool mayStore() const
Return true if this instruction could possibly modify memory.
unsigned const TypeCVI_LAST
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
unsigned succ_size() const
int getDotNewOp(const MachineInstr &MI) const
const MachineBasicBlock * getParent() const
int getCondOpcode(int Opc, bool sense) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
bool isPredicateLate(unsigned Opcode) const
Representation of each machine instruction.
bool addLatencyToSchedule(const MachineInstr &MI1, const MachineInstr &MI2) const
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isDependent(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
void addTargetFlag(unsigned F)
static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB, MachineBasicBlock::const_instr_iterator MIE)
Calculate number of instructions excluding the debug instructions.
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const int Hexagon_MEMB_OFFSET_MAX
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
LLVM_NODISCARD bool empty() const
These values represent a non-pipelined step in the execution of an instruction.
Represents a single loop in the control flow graph.
int getDotOldOp(const MachineInstr &MI) const
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isValidOffset(unsigned Opcode, int Offset, const TargetRegisterInfo *TRI, bool Extend=true) const
static MachineOperand CreateImm(int64_t Val)
bool mayBeNewStore(const MachineInstr &MI) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isPredicatedTrue(const MachineInstr &MI) const
const int Hexagon_MEMW_OFFSET_MIN
bool isNewValueJump(const MachineInstr &MI) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool hasNonExtEquivalent(const MachineInstr &MI) const
unsigned getInstrTimingClassLatency(const InstrItineraryData *ItinData, const MachineInstr &MI) const
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
int getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
getOperandLatency - Compute and return the use operand latency of a given pair of def and use...
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Instructions::iterator instr_iterator
const std::string to_string(const T &Value)
bool isTC2(const MachineInstr &MI) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isBranch(unsigned Opcode)
bool is_TC4x(unsigned SchedClass)
bool isLoopN(const MachineInstr &MI) const
bool isHVXMemWithAIndirect(const MachineInstr &I, const MachineInstr &J) const
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
LLVM Value Representation.
bool PredOpcodeHasJMP_c(unsigned Opcode) const
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
bool mayBeCurLoad(const MachineInstr &MI) const
bool isAddrModeWithOffset(const MachineInstr &MI) const
unsigned getOpcode() const
Return the opcode number for this descriptor.
static cl::opt< bool > EnableTimingClassLatency("enable-timing-class-latency", cl::Hidden, cl::init(false), cl::desc("Enable timing class latency"))
short changeAddrMode_pi_io(short Opc) const
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
StringRef - Represent a constant reference to a string, i.e.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
Address of indexed Constant in Constant Pool.
bool isMemOp(const MachineInstr &MI) const
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
const MachineOperand & getOperand(unsigned i) const
bool isBaseImmOffset(const MachineInstr &MI) const
bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const override
Analyze the loop code, return true if it cannot be understood.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
int findRegisterUseOperandIdx(unsigned Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a use of the specific register or -1 if it is not found...
Instructions::const_iterator const_instr_iterator
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
bool isIndirectL4Return(const MachineInstr &MI) const
bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA=nullptr) const override
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
MO_HI16 - On a symbol operand, this represents a relocation containing higher 16 bit of the address...
bool empty() const
empty - Check if the array is empty.
const int Hexagon_MEMB_OFFSET_MIN
cl::opt< bool > ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden, cl::init(false), cl::desc("Do not consider inline-asm a scheduling/" "packetization boundary."))
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
HexagonII::CompoundGroup getCompoundCandidateGroup(const MachineInstr &MI) const