LLVM  8.0.1
HexagonInstrInfo.cpp
Go to the documentation of this file.
1 //===- HexagonInstrInfo.cpp - Hexagon Instruction Information -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "HexagonInstrInfo.h"
15 #include "Hexagon.h"
16 #include "HexagonFrameLowering.h"
18 #include "HexagonRegisterInfo.h"
19 #include "HexagonSubtarget.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringRef.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/MC/MCAsmInfo.h"
44 #include "llvm/MC/MCInstrDesc.h"
46 #include "llvm/MC/MCRegisterInfo.h"
49 #include "llvm/Support/Debug.h"
55 #include <cassert>
56 #include <cctype>
57 #include <cstdint>
58 #include <cstring>
59 #include <iterator>
60 #include <string>
61 #include <utility>
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "hexagon-instrinfo"
66 
67 #define GET_INSTRINFO_CTOR_DTOR
68 #define GET_INSTRMAP_INFO
70 #include "HexagonGenDFAPacketizer.inc"
71 #include "HexagonGenInstrInfo.inc"
72 
73 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
74  cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
75  "packetization boundary."));
76 
77 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
78  cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
79 
80 static cl::opt<bool> DisableNVSchedule("disable-hexagon-nv-schedule",
82  cl::desc("Disable schedule adjustment for new value stores."));
83 
85  "enable-timing-class-latency", cl::Hidden, cl::init(false),
86  cl::desc("Enable timing class latency"));
87 
89  "enable-alu-forwarding", cl::Hidden, cl::init(true),
90  cl::desc("Enable vec alu forwarding"));
91 
93  "enable-acc-forwarding", cl::Hidden, cl::init(true),
94  cl::desc("Enable vec acc forwarding"));
95 
96 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
97  cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"));
98 
99 static cl::opt<bool> UseDFAHazardRec("dfa-hazard-rec",
101  cl::desc("Use the DFA based hazard recognizer."));
102 
103 /// Constants for Hexagon instructions.
104 const int Hexagon_MEMW_OFFSET_MAX = 4095;
105 const int Hexagon_MEMW_OFFSET_MIN = -4096;
106 const int Hexagon_MEMD_OFFSET_MAX = 8191;
107 const int Hexagon_MEMD_OFFSET_MIN = -8192;
108 const int Hexagon_MEMH_OFFSET_MAX = 2047;
109 const int Hexagon_MEMH_OFFSET_MIN = -2048;
110 const int Hexagon_MEMB_OFFSET_MAX = 1023;
111 const int Hexagon_MEMB_OFFSET_MIN = -1024;
112 const int Hexagon_ADDI_OFFSET_MAX = 32767;
113 const int Hexagon_ADDI_OFFSET_MIN = -32768;
114 
115 // Pin the vtable to this file.
116 void HexagonInstrInfo::anchor() {}
117 
119  : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
120  Subtarget(ST) {}
121 
122 static bool isIntRegForSubInst(unsigned Reg) {
123  return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
124  (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
125 }
126 
127 static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI) {
128  return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_lo)) &&
129  isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_hi));
130 }
131 
132 /// Calculate number of instructions excluding the debug instructions.
135  unsigned Count = 0;
136  for (; MIB != MIE; ++MIB) {
137  if (!MIB->isDebugInstr())
138  ++Count;
139  }
140  return Count;
141 }
142 
143 /// Find the hardware loop instruction used to set-up the specified loop.
144 /// On Hexagon, we have two instructions used to set-up the hardware loop
145 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
146 /// to indicate the end of a loop.
148  unsigned EndLoopOp, MachineBasicBlock *TargetBB,
149  SmallPtrSet<MachineBasicBlock *, 8> &Visited) const {
150  unsigned LOOPi;
151  unsigned LOOPr;
152  if (EndLoopOp == Hexagon::ENDLOOP0) {
153  LOOPi = Hexagon::J2_loop0i;
154  LOOPr = Hexagon::J2_loop0r;
155  } else { // EndLoopOp == Hexagon::EndLOOP1
156  LOOPi = Hexagon::J2_loop1i;
157  LOOPr = Hexagon::J2_loop1r;
158  }
159 
160  // The loop set-up instruction will be in a predecessor block
161  for (MachineBasicBlock *PB : BB->predecessors()) {
162  // If this has been visited, already skip it.
163  if (!Visited.insert(PB).second)
164  continue;
165  if (PB == BB)
166  continue;
167  for (auto I = PB->instr_rbegin(), E = PB->instr_rend(); I != E; ++I) {
168  unsigned Opc = I->getOpcode();
169  if (Opc == LOOPi || Opc == LOOPr)
170  return &*I;
171  // We've reached a different loop, which means the loop01 has been
172  // removed.
173  if (Opc == EndLoopOp && I->getOperand(0).getMBB() != TargetBB)
174  return nullptr;
175  }
176  // Check the predecessors for the LOOP instruction.
177  if (MachineInstr *Loop = findLoopInstr(PB, EndLoopOp, TargetBB, Visited))
178  return Loop;
179  }
180  return nullptr;
181 }
182 
183 /// Gather register def/uses from MI.
184 /// This treats possible (predicated) defs as actually happening ones
185 /// (conservatively).
186 static inline void parseOperands(const MachineInstr &MI,
188  Defs.clear();
189  Uses.clear();
190 
191  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
192  const MachineOperand &MO = MI.getOperand(i);
193 
194  if (!MO.isReg())
195  continue;
196 
197  unsigned Reg = MO.getReg();
198  if (!Reg)
199  continue;
200 
201  if (MO.isUse())
202  Uses.push_back(MO.getReg());
203 
204  if (MO.isDef())
205  Defs.push_back(MO.getReg());
206  }
207 }
208 
209 // Position dependent, so check twice for swap.
210 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
211  switch (Ga) {
213  default:
214  return false;
215  case HexagonII::HSIG_L1:
216  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
217  case HexagonII::HSIG_L2:
218  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
219  Gb == HexagonII::HSIG_A);
220  case HexagonII::HSIG_S1:
221  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
222  Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
223  case HexagonII::HSIG_S2:
224  return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
225  Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
226  Gb == HexagonII::HSIG_A);
227  case HexagonII::HSIG_A:
228  return (Gb == HexagonII::HSIG_A);
230  return (Gb == HexagonII::HSIG_Compound);
231  }
232  return false;
233 }
234 
235 /// isLoadFromStackSlot - If the specified machine instruction is a direct
236 /// load from a stack slot, return the virtual or physical register number of
237 /// the destination along with the FrameIndex of the loaded stack slot. If
238 /// not, return 0. This predicate must return 0 if the instruction has
239 /// any side effects other than loading from the stack slot.
241  int &FrameIndex) const {
242  switch (MI.getOpcode()) {
243  default:
244  break;
245  case Hexagon::L2_loadri_io:
246  case Hexagon::L2_loadrd_io:
247  case Hexagon::V6_vL32b_ai:
248  case Hexagon::V6_vL32b_nt_ai:
249  case Hexagon::V6_vL32Ub_ai:
250  case Hexagon::LDriw_pred:
251  case Hexagon::LDriw_ctr:
252  case Hexagon::PS_vloadrq_ai:
253  case Hexagon::PS_vloadrw_ai:
254  case Hexagon::PS_vloadrw_nt_ai: {
255  const MachineOperand OpFI = MI.getOperand(1);
256  if (!OpFI.isFI())
257  return 0;
258  const MachineOperand OpOff = MI.getOperand(2);
259  if (!OpOff.isImm() || OpOff.getImm() != 0)
260  return 0;
261  FrameIndex = OpFI.getIndex();
262  return MI.getOperand(0).getReg();
263  }
264 
265  case Hexagon::L2_ploadrit_io:
266  case Hexagon::L2_ploadrif_io:
267  case Hexagon::L2_ploadrdt_io:
268  case Hexagon::L2_ploadrdf_io: {
269  const MachineOperand OpFI = MI.getOperand(2);
270  if (!OpFI.isFI())
271  return 0;
272  const MachineOperand OpOff = MI.getOperand(3);
273  if (!OpOff.isImm() || OpOff.getImm() != 0)
274  return 0;
275  FrameIndex = OpFI.getIndex();
276  return MI.getOperand(0).getReg();
277  }
278  }
279 
280  return 0;
281 }
282 
283 /// isStoreToStackSlot - If the specified machine instruction is a direct
284 /// store to a stack slot, return the virtual or physical register number of
285 /// the source reg along with the FrameIndex of the loaded stack slot. If
286 /// not, return 0. This predicate must return 0 if the instruction has
287 /// any side effects other than storing to the stack slot.
289  int &FrameIndex) const {
290  switch (MI.getOpcode()) {
291  default:
292  break;
293  case Hexagon::S2_storerb_io:
294  case Hexagon::S2_storerh_io:
295  case Hexagon::S2_storeri_io:
296  case Hexagon::S2_storerd_io:
297  case Hexagon::V6_vS32b_ai:
298  case Hexagon::V6_vS32Ub_ai:
299  case Hexagon::STriw_pred:
300  case Hexagon::STriw_ctr:
301  case Hexagon::PS_vstorerq_ai:
302  case Hexagon::PS_vstorerw_ai: {
303  const MachineOperand &OpFI = MI.getOperand(0);
304  if (!OpFI.isFI())
305  return 0;
306  const MachineOperand &OpOff = MI.getOperand(1);
307  if (!OpOff.isImm() || OpOff.getImm() != 0)
308  return 0;
309  FrameIndex = OpFI.getIndex();
310  return MI.getOperand(2).getReg();
311  }
312 
313  case Hexagon::S2_pstorerbt_io:
314  case Hexagon::S2_pstorerbf_io:
315  case Hexagon::S2_pstorerht_io:
316  case Hexagon::S2_pstorerhf_io:
317  case Hexagon::S2_pstorerit_io:
318  case Hexagon::S2_pstorerif_io:
319  case Hexagon::S2_pstorerdt_io:
320  case Hexagon::S2_pstorerdf_io: {
321  const MachineOperand &OpFI = MI.getOperand(1);
322  if (!OpFI.isFI())
323  return 0;
324  const MachineOperand &OpOff = MI.getOperand(2);
325  if (!OpOff.isImm() || OpOff.getImm() != 0)
326  return 0;
327  FrameIndex = OpFI.getIndex();
328  return MI.getOperand(3).getReg();
329  }
330  }
331 
332  return 0;
333 }
334 
335 /// This function checks if the instruction or bundle of instructions
336 /// has load from stack slot and returns frameindex and machine memory
337 /// operand of that instruction if true.
339  const MachineInstr &MI,
341  if (MI.isBundle()) {
342  const MachineBasicBlock *MBB = MI.getParent();
344  for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
345  if (TargetInstrInfo::hasLoadFromStackSlot(*MII, Accesses))
346  return true;
347  return false;
348  }
349 
350  return TargetInstrInfo::hasLoadFromStackSlot(MI, Accesses);
351 }
352 
353 /// This function checks if the instruction or bundle of instructions
354 /// has store to stack slot and returns frameindex and machine memory
355 /// operand of that instruction if true.
357  const MachineInstr &MI,
359  if (MI.isBundle()) {
360  const MachineBasicBlock *MBB = MI.getParent();
362  for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
363  if (TargetInstrInfo::hasStoreToStackSlot(*MII, Accesses))
364  return true;
365  return false;
366  }
367 
368  return TargetInstrInfo::hasStoreToStackSlot(MI, Accesses);
369 }
370 
371 /// This function can analyze one/two way branching only and should (mostly) be
372 /// called by target independent side.
373 /// First entry is always the opcode of the branching instruction, except when
374 /// the Cond vector is supposed to be empty, e.g., when AnalyzeBranch fails, a
375 /// BB with only unconditional jump. Subsequent entries depend upon the opcode,
376 /// e.g. Jump_c p will have
377 /// Cond[0] = Jump_c
378 /// Cond[1] = p
379 /// HW-loop ENDLOOP:
380 /// Cond[0] = ENDLOOP
381 /// Cond[1] = MBB
382 /// New value jump:
383 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
384 /// Cond[1] = R
385 /// Cond[2] = Imm
387  MachineBasicBlock *&TBB,
388  MachineBasicBlock *&FBB,
390  bool AllowModify) const {
391  TBB = nullptr;
392  FBB = nullptr;
393  Cond.clear();
394 
395  // If the block has no terminators, it just falls into the block after it.
397  if (I == MBB.instr_begin())
398  return false;
399 
400  // A basic block may looks like this:
401  //
402  // [ insn
403  // EH_LABEL
404  // insn
405  // insn
406  // insn
407  // EH_LABEL
408  // insn ]
409  //
410  // It has two succs but does not have a terminator
411  // Don't know how to handle it.
412  do {
413  --I;
414  if (I->isEHLabel())
415  // Don't analyze EH branches.
416  return true;
417  } while (I != MBB.instr_begin());
418 
419  I = MBB.instr_end();
420  --I;
421 
422  while (I->isDebugInstr()) {
423  if (I == MBB.instr_begin())
424  return false;
425  --I;
426  }
427 
428  bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
429  I->getOperand(0).isMBB();
430  // Delete the J2_jump if it's equivalent to a fall-through.
431  if (AllowModify && JumpToBlock &&
432  MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
433  LLVM_DEBUG(dbgs() << "\nErasing the jump to successor block\n";);
434  I->eraseFromParent();
435  I = MBB.instr_end();
436  if (I == MBB.instr_begin())
437  return false;
438  --I;
439  }
440  if (!isUnpredicatedTerminator(*I))
441  return false;
442 
443  // Get the last instruction in the block.
444  MachineInstr *LastInst = &*I;
445  MachineInstr *SecondLastInst = nullptr;
446  // Find one more terminator if present.
447  while (true) {
448  if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
449  if (!SecondLastInst)
450  SecondLastInst = &*I;
451  else
452  // This is a third branch.
453  return true;
454  }
455  if (I == MBB.instr_begin())
456  break;
457  --I;
458  }
459 
460  int LastOpcode = LastInst->getOpcode();
461  int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
462  // If the branch target is not a basic block, it could be a tail call.
463  // (It is, if the target is a function.)
464  if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
465  return true;
466  if (SecLastOpcode == Hexagon::J2_jump &&
467  !SecondLastInst->getOperand(0).isMBB())
468  return true;
469 
470  bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
471  bool LastOpcodeHasNVJump = isNewValueJump(*LastInst);
472 
473  if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
474  return true;
475 
476  // If there is only one terminator instruction, process it.
477  if (LastInst && !SecondLastInst) {
478  if (LastOpcode == Hexagon::J2_jump) {
479  TBB = LastInst->getOperand(0).getMBB();
480  return false;
481  }
482  if (isEndLoopN(LastOpcode)) {
483  TBB = LastInst->getOperand(0).getMBB();
484  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
485  Cond.push_back(LastInst->getOperand(0));
486  return false;
487  }
488  if (LastOpcodeHasJMP_c) {
489  TBB = LastInst->getOperand(1).getMBB();
490  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
491  Cond.push_back(LastInst->getOperand(0));
492  return false;
493  }
494  // Only supporting rr/ri versions of new-value jumps.
495  if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
496  TBB = LastInst->getOperand(2).getMBB();
497  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
498  Cond.push_back(LastInst->getOperand(0));
499  Cond.push_back(LastInst->getOperand(1));
500  return false;
501  }
502  LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
503  << " with one jump\n";);
504  // Otherwise, don't know what this is.
505  return true;
506  }
507 
508  bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
509  bool SecLastOpcodeHasNVJump = isNewValueJump(*SecondLastInst);
510  if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
511  if (!SecondLastInst->getOperand(1).isMBB())
512  return true;
513  TBB = SecondLastInst->getOperand(1).getMBB();
514  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
515  Cond.push_back(SecondLastInst->getOperand(0));
516  FBB = LastInst->getOperand(0).getMBB();
517  return false;
518  }
519 
520  // Only supporting rr/ri versions of new-value jumps.
521  if (SecLastOpcodeHasNVJump &&
522  (SecondLastInst->getNumExplicitOperands() == 3) &&
523  (LastOpcode == Hexagon::J2_jump)) {
524  TBB = SecondLastInst->getOperand(2).getMBB();
525  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
526  Cond.push_back(SecondLastInst->getOperand(0));
527  Cond.push_back(SecondLastInst->getOperand(1));
528  FBB = LastInst->getOperand(0).getMBB();
529  return false;
530  }
531 
532  // If the block ends with two Hexagon:JMPs, handle it. The second one is not
533  // executed, so remove it.
534  if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
535  TBB = SecondLastInst->getOperand(0).getMBB();
536  I = LastInst->getIterator();
537  if (AllowModify)
538  I->eraseFromParent();
539  return false;
540  }
541 
542  // If the block ends with an ENDLOOP, and J2_jump, handle it.
543  if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
544  TBB = SecondLastInst->getOperand(0).getMBB();
545  Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
546  Cond.push_back(SecondLastInst->getOperand(0));
547  FBB = LastInst->getOperand(0).getMBB();
548  return false;
549  }
550  LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
551  << " with two jumps";);
552  // Otherwise, can't handle this.
553  return true;
554 }
555 
557  int *BytesRemoved) const {
558  assert(!BytesRemoved && "code size not handled");
559 
560  LLVM_DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB));
562  unsigned Count = 0;
563  while (I != MBB.begin()) {
564  --I;
565  if (I->isDebugInstr())
566  continue;
567  // Only removing branches from end of MBB.
568  if (!I->isBranch())
569  return Count;
570  if (Count && (I->getOpcode() == Hexagon::J2_jump))
571  llvm_unreachable("Malformed basic block: unconditional branch not last");
572  MBB.erase(&MBB.back());
573  I = MBB.end();
574  ++Count;
575  }
576  return Count;
577 }
578 
580  MachineBasicBlock *TBB,
581  MachineBasicBlock *FBB,
583  const DebugLoc &DL,
584  int *BytesAdded) const {
585  unsigned BOpc = Hexagon::J2_jump;
586  unsigned BccOpc = Hexagon::J2_jumpt;
587  assert(validateBranchCond(Cond) && "Invalid branching condition");
588  assert(TBB && "insertBranch must not be told to insert a fallthrough");
589  assert(!BytesAdded && "code size not handled");
590 
591  // Check if reverseBranchCondition has asked to reverse this branch
592  // If we want to reverse the branch an odd number of times, we want
593  // J2_jumpf.
594  if (!Cond.empty() && Cond[0].isImm())
595  BccOpc = Cond[0].getImm();
596 
597  if (!FBB) {
598  if (Cond.empty()) {
599  // Due to a bug in TailMerging/CFG Optimization, we need to add a
600  // special case handling of a predicated jump followed by an
601  // unconditional jump. If not, Tail Merging and CFG Optimization go
602  // into an infinite loop.
603  MachineBasicBlock *NewTBB, *NewFBB;
605  auto Term = MBB.getFirstTerminator();
606  if (Term != MBB.end() && isPredicated(*Term) &&
607  !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false) &&
608  MachineFunction::iterator(NewTBB) == ++MBB.getIterator()) {
610  removeBranch(MBB);
611  return insertBranch(MBB, TBB, nullptr, Cond, DL);
612  }
613  BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
614  } else if (isEndLoopN(Cond[0].getImm())) {
615  int EndLoopOp = Cond[0].getImm();
616  assert(Cond[1].isMBB());
617  // Since we're adding an ENDLOOP, there better be a LOOP instruction.
618  // Check for it, and change the BB target if needed.
620  MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
621  VisitedBBs);
622  assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
623  Loop->getOperand(0).setMBB(TBB);
624  // Add the ENDLOOP after the finding the LOOP0.
625  BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
626  } else if (isNewValueJump(Cond[0].getImm())) {
627  assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump");
628  // New value jump
629  // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
630  // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
631  unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
632  LLVM_DEBUG(dbgs() << "\nInserting NVJump for "
633  << printMBBReference(MBB););
634  if (Cond[2].isReg()) {
635  unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
636  BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
637  addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
638  } else if(Cond[2].isImm()) {
639  BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
640  addImm(Cond[2].getImm()).addMBB(TBB);
641  } else
642  llvm_unreachable("Invalid condition for branching");
643  } else {
644  assert((Cond.size() == 2) && "Malformed cond vector");
645  const MachineOperand &RO = Cond[1];
646  unsigned Flags = getUndefRegState(RO.isUndef());
647  BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
648  }
649  return 1;
650  }
651  assert((!Cond.empty()) &&
652  "Cond. cannot be empty when multiple branchings are required");
653  assert((!isNewValueJump(Cond[0].getImm())) &&
654  "NV-jump cannot be inserted with another branch");
655  // Special case for hardware loops. The condition is a basic block.
656  if (isEndLoopN(Cond[0].getImm())) {
657  int EndLoopOp = Cond[0].getImm();
658  assert(Cond[1].isMBB());
659  // Since we're adding an ENDLOOP, there better be a LOOP instruction.
660  // Check for it, and change the BB target if needed.
662  MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
663  VisitedBBs);
664  assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
665  Loop->getOperand(0).setMBB(TBB);
666  // Add the ENDLOOP after the finding the LOOP0.
667  BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
668  } else {
669  const MachineOperand &RO = Cond[1];
670  unsigned Flags = getUndefRegState(RO.isUndef());
671  BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
672  }
673  BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
674 
675  return 2;
676 }
677 
678 /// Analyze the loop code to find the loop induction variable and compare used
679 /// to compute the number of iterations. Currently, we analyze loop that are
680 /// controlled using hardware loops. In this case, the induction variable
681 /// instruction is null. For all other cases, this function returns true, which
682 /// means we're unable to analyze it.
684  MachineInstr *&IndVarInst,
685  MachineInstr *&CmpInst) const {
686 
687  MachineBasicBlock *LoopEnd = L.getBottomBlock();
689  // We really "analyze" only hardware loops right now.
690  if (I != LoopEnd->end() && isEndLoopN(I->getOpcode())) {
691  IndVarInst = nullptr;
692  CmpInst = &*I;
693  return false;
694  }
695  return true;
696 }
697 
698 /// Generate code to reduce the loop iteration by one and check if the loop is
699 /// finished. Return the value/register of the new loop count. this function
700 /// assumes the nth iteration is peeled first.
702  MachineInstr *IndVar, MachineInstr &Cmp,
705  unsigned Iter, unsigned MaxIter) const {
706  // We expect a hardware loop currently. This means that IndVar is set
707  // to null, and the compare is the ENDLOOP instruction.
708  assert((!IndVar) && isEndLoopN(Cmp.getOpcode())
709  && "Expecting a hardware loop");
710  MachineFunction *MF = MBB.getParent();
711  DebugLoc DL = Cmp.getDebugLoc();
713  MachineInstr *Loop = findLoopInstr(&MBB, Cmp.getOpcode(),
714  Cmp.getOperand(0).getMBB(), VisitedBBs);
715  if (!Loop)
716  return 0;
717  // If the loop trip count is a compile-time value, then just change the
718  // value.
719  if (Loop->getOpcode() == Hexagon::J2_loop0i ||
720  Loop->getOpcode() == Hexagon::J2_loop1i) {
721  int64_t Offset = Loop->getOperand(1).getImm();
722  if (Offset <= 1)
723  Loop->eraseFromParent();
724  else
725  Loop->getOperand(1).setImm(Offset - 1);
726  return Offset - 1;
727  }
728  // The loop trip count is a run-time value. We generate code to subtract
729  // one from the trip count, and update the loop instruction.
730  assert(Loop->getOpcode() == Hexagon::J2_loop0r && "Unexpected instruction");
731  unsigned LoopCount = Loop->getOperand(1).getReg();
732  // Check if we're done with the loop.
733  unsigned LoopEnd = createVR(MF, MVT::i1);
734  MachineInstr *NewCmp = BuildMI(&MBB, DL, get(Hexagon::C2_cmpgtui), LoopEnd).
735  addReg(LoopCount).addImm(1);
736  unsigned NewLoopCount = createVR(MF, MVT::i32);
737  MachineInstr *NewAdd = BuildMI(&MBB, DL, get(Hexagon::A2_addi), NewLoopCount).
738  addReg(LoopCount).addImm(-1);
739  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
740  // Update the previously generated instructions with the new loop counter.
742  E = PrevInsts.end(); I != E; ++I)
743  (*I)->substituteRegister(LoopCount, NewLoopCount, 0, HRI);
744  PrevInsts.clear();
745  PrevInsts.push_back(NewCmp);
746  PrevInsts.push_back(NewAdd);
747  // Insert the new loop instruction if this is the last time the loop is
748  // decremented.
749  if (Iter == MaxIter)
750  BuildMI(&MBB, DL, get(Hexagon::J2_loop0r)).
751  addMBB(Loop->getOperand(0).getMBB()).addReg(NewLoopCount);
752  // Delete the old loop instruction.
753  if (Iter == 0)
754  Loop->eraseFromParent();
755  Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf));
756  Cond.push_back(NewCmp->getOperand(0));
757  return NewLoopCount;
758 }
759 
761  unsigned NumCycles, unsigned ExtraPredCycles,
762  BranchProbability Probability) const {
763  return nonDbgBBSize(&MBB) <= 3;
764 }
765 
767  unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
768  unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
769  const {
770  return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
771 }
772 
774  unsigned NumInstrs, BranchProbability Probability) const {
775  return NumInstrs <= 4;
776 }
777 
780  const DebugLoc &DL, unsigned DestReg,
781  unsigned SrcReg, bool KillSrc) const {
782  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
783  unsigned KillFlag = getKillRegState(KillSrc);
784 
785  if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
786  BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
787  .addReg(SrcReg, KillFlag);
788  return;
789  }
790  if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
791  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
792  .addReg(SrcReg, KillFlag);
793  return;
794  }
795  if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
796  // Map Pd = Ps to Pd = or(Ps, Ps).
797  BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
798  .addReg(SrcReg).addReg(SrcReg, KillFlag);
799  return;
800  }
801  if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
802  Hexagon::IntRegsRegClass.contains(SrcReg)) {
803  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
804  .addReg(SrcReg, KillFlag);
805  return;
806  }
807  if (Hexagon::IntRegsRegClass.contains(DestReg) &&
808  Hexagon::CtrRegsRegClass.contains(SrcReg)) {
809  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
810  .addReg(SrcReg, KillFlag);
811  return;
812  }
813  if (Hexagon::ModRegsRegClass.contains(DestReg) &&
814  Hexagon::IntRegsRegClass.contains(SrcReg)) {
815  BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
816  .addReg(SrcReg, KillFlag);
817  return;
818  }
819  if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
820  Hexagon::IntRegsRegClass.contains(DestReg)) {
821  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
822  .addReg(SrcReg, KillFlag);
823  return;
824  }
825  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
826  Hexagon::PredRegsRegClass.contains(DestReg)) {
827  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
828  .addReg(SrcReg, KillFlag);
829  return;
830  }
831  if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
832  Hexagon::IntRegsRegClass.contains(DestReg)) {
833  BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
834  .addReg(SrcReg, KillFlag);
835  return;
836  }
837  if (Hexagon::HvxVRRegClass.contains(SrcReg, DestReg)) {
838  BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
839  addReg(SrcReg, KillFlag);
840  return;
841  }
842  if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) {
843  unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
844  unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
845  BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
846  .addReg(HiSrc, KillFlag)
847  .addReg(LoSrc, KillFlag);
848  return;
849  }
850  if (Hexagon::HvxQRRegClass.contains(SrcReg, DestReg)) {
851  BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
852  .addReg(SrcReg)
853  .addReg(SrcReg, KillFlag);
854  return;
855  }
856  if (Hexagon::HvxQRRegClass.contains(SrcReg) &&
857  Hexagon::HvxVRRegClass.contains(DestReg)) {
858  llvm_unreachable("Unimplemented pred to vec");
859  return;
860  }
861  if (Hexagon::HvxQRRegClass.contains(DestReg) &&
862  Hexagon::HvxVRRegClass.contains(SrcReg)) {
863  llvm_unreachable("Unimplemented vec to pred");
864  return;
865  }
866 
867 #ifndef NDEBUG
868  // Show the invalid registers to ease debugging.
869  dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": "
870  << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n';
871 #endif
872  llvm_unreachable("Unimplemented");
873 }
874 
876  MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI,
877  const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
878  DebugLoc DL = MBB.findDebugLoc(I);
879  MachineFunction &MF = *MBB.getParent();
880  MachineFrameInfo &MFI = MF.getFrameInfo();
881  unsigned SlotAlign = MFI.getObjectAlignment(FI);
882  unsigned RegAlign = TRI->getSpillAlignment(*RC);
883  unsigned KillFlag = getKillRegState(isKill);
884  bool HasAlloca = MFI.hasVarSizedObjects();
885  const HexagonFrameLowering &HFI = *Subtarget.getFrameLowering();
886 
889  MFI.getObjectSize(FI), SlotAlign);
890 
891  if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
892  BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
893  .addFrameIndex(FI).addImm(0)
894  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
895  } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
896  BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
897  .addFrameIndex(FI).addImm(0)
898  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
899  } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
900  BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
901  .addFrameIndex(FI).addImm(0)
902  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
903  } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
904  BuildMI(MBB, I, DL, get(Hexagon::STriw_ctr))
905  .addFrameIndex(FI).addImm(0)
906  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
907  } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
908  BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
909  .addFrameIndex(FI).addImm(0)
910  .addReg(SrcReg, KillFlag).addMemOperand(MMO);
911  } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
912  // If there are variable-sized objects, spills will not be aligned.
913  if (HasAlloca)
914  SlotAlign = HFI.getStackAlignment();
915  unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vS32Ub_ai
916  : Hexagon::V6_vS32b_ai;
919  MFI.getObjectSize(FI), SlotAlign);
920  BuildMI(MBB, I, DL, get(Opc))
921  .addFrameIndex(FI).addImm(0)
922  .addReg(SrcReg, KillFlag).addMemOperand(MMOA);
923  } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
924  // If there are variable-sized objects, spills will not be aligned.
925  if (HasAlloca)
926  SlotAlign = HFI.getStackAlignment();
927  unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vstorerwu_ai
928  : Hexagon::PS_vstorerw_ai;
931  MFI.getObjectSize(FI), SlotAlign);
932  BuildMI(MBB, I, DL, get(Opc))
933  .addFrameIndex(FI).addImm(0)
934  .addReg(SrcReg, KillFlag).addMemOperand(MMOA);
935  } else {
936  llvm_unreachable("Unimplemented");
937  }
938 }
939 
941  MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg,
942  int FI, const TargetRegisterClass *RC,
943  const TargetRegisterInfo *TRI) const {
944  DebugLoc DL = MBB.findDebugLoc(I);
945  MachineFunction &MF = *MBB.getParent();
946  MachineFrameInfo &MFI = MF.getFrameInfo();
947  unsigned SlotAlign = MFI.getObjectAlignment(FI);
948  unsigned RegAlign = TRI->getSpillAlignment(*RC);
949  bool HasAlloca = MFI.hasVarSizedObjects();
950  const HexagonFrameLowering &HFI = *Subtarget.getFrameLowering();
951 
954  MFI.getObjectSize(FI), SlotAlign);
955 
956  if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
957  BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
958  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
959  } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
960  BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
961  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
962  } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
963  BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
964  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
965  } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
966  BuildMI(MBB, I, DL, get(Hexagon::LDriw_ctr), DestReg)
967  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
968  } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
969  BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
970  .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
971  } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
972  // If there are variable-sized objects, spills will not be aligned.
973  if (HasAlloca)
974  SlotAlign = HFI.getStackAlignment();
975  unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vL32Ub_ai
976  : Hexagon::V6_vL32b_ai;
979  MFI.getObjectSize(FI), SlotAlign);
980  BuildMI(MBB, I, DL, get(Opc), DestReg)
981  .addFrameIndex(FI).addImm(0).addMemOperand(MMOA);
982  } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
983  // If there are variable-sized objects, spills will not be aligned.
984  if (HasAlloca)
985  SlotAlign = HFI.getStackAlignment();
986  unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vloadrwu_ai
987  : Hexagon::PS_vloadrw_ai;
990  MFI.getObjectSize(FI), SlotAlign);
991  BuildMI(MBB, I, DL, get(Opc), DestReg)
992  .addFrameIndex(FI).addImm(0).addMemOperand(MMOA);
993  } else {
994  llvm_unreachable("Can't store this register to stack slot");
995  }
996 }
997 
998 static void getLiveRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
999  const MachineBasicBlock &B = *MI.getParent();
1000  Regs.addLiveOuts(B);
1001  auto E = ++MachineBasicBlock::const_iterator(MI.getIterator()).getReverse();
1002  for (auto I = B.rbegin(); I != E; ++I)
1003  Regs.stepBackward(*I);
1004 }
1005 
1006 /// expandPostRAPseudo - This function is called for all pseudo instructions
1007 /// that remain after register allocation. Many pseudo instructions are
1008 /// created to help register allocation. This is the place to convert them
1009 /// into real instructions. The target can edit MI in place, or it can insert
1010 /// new instructions and erase MI. The function should return true if
1011 /// anything was changed.
1013  MachineBasicBlock &MBB = *MI.getParent();
1014  MachineFunction &MF = *MBB.getParent();
1016  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1017  DebugLoc DL = MI.getDebugLoc();
1018  unsigned Opc = MI.getOpcode();
1019 
1020  auto RealCirc = [&](unsigned Opc, bool HasImm, unsigned MxOp) {
1021  unsigned Mx = MI.getOperand(MxOp).getReg();
1022  unsigned CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1);
1023  BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrrcr), CSx)
1024  .add(MI.getOperand((HasImm ? 5 : 4)));
1025  auto MIB = BuildMI(MBB, MI, DL, get(Opc)).add(MI.getOperand(0))
1026  .add(MI.getOperand(1)).add(MI.getOperand(2)).add(MI.getOperand(3));
1027  if (HasImm)
1028  MIB.add(MI.getOperand(4));
1029  MIB.addReg(CSx, RegState::Implicit);
1030  MBB.erase(MI);
1031  return true;
1032  };
1033 
1034  switch (Opc) {
1035  case TargetOpcode::COPY: {
1036  MachineOperand &MD = MI.getOperand(0);
1037  MachineOperand &MS = MI.getOperand(1);
1039  if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
1040  copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
1041  std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
1042  }
1043  MBB.erase(MBBI);
1044  return true;
1045  }
1046  case Hexagon::PS_aligna:
1047  BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
1048  .addReg(HRI.getFrameRegister())
1049  .addImm(-MI.getOperand(1).getImm());
1050  MBB.erase(MI);
1051  return true;
1052  case Hexagon::V6_vassignp: {
1053  unsigned SrcReg = MI.getOperand(1).getReg();
1054  unsigned DstReg = MI.getOperand(0).getReg();
1055  unsigned Kill = getKillRegState(MI.getOperand(1).isKill());
1056  BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg)
1057  .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi), Kill)
1058  .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo), Kill);
1059  MBB.erase(MI);
1060  return true;
1061  }
1062  case Hexagon::V6_lo: {
1063  unsigned SrcReg = MI.getOperand(1).getReg();
1064  unsigned DstReg = MI.getOperand(0).getReg();
1065  unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1066  copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
1067  MBB.erase(MI);
1068  MRI.clearKillFlags(SrcSubLo);
1069  return true;
1070  }
1071  case Hexagon::V6_hi: {
1072  unsigned SrcReg = MI.getOperand(1).getReg();
1073  unsigned DstReg = MI.getOperand(0).getReg();
1074  unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1075  copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
1076  MBB.erase(MI);
1077  MRI.clearKillFlags(SrcSubHi);
1078  return true;
1079  }
1080  case Hexagon::PS_vstorerw_ai:
1081  case Hexagon::PS_vstorerwu_ai: {
1082  bool Aligned = Opc == Hexagon::PS_vstorerw_ai;
1083  unsigned SrcReg = MI.getOperand(2).getReg();
1084  unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1085  unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1086  unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai;
1087  unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1088 
1089  MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc))
1090  .add(MI.getOperand(0))
1091  .addImm(MI.getOperand(1).getImm())
1092  .addReg(SrcSubLo)
1093  .cloneMemRefs(MI);
1094  MI1New->getOperand(0).setIsKill(false);
1095  BuildMI(MBB, MI, DL, get(NewOpc))
1096  .add(MI.getOperand(0))
1097  // The Vectors are indexed in multiples of vector size.
1098  .addImm(MI.getOperand(1).getImm() + Offset)
1099  .addReg(SrcSubHi)
1100  .cloneMemRefs(MI);
1101  MBB.erase(MI);
1102  return true;
1103  }
1104  case Hexagon::PS_vloadrw_ai:
1105  case Hexagon::PS_vloadrwu_ai: {
1106  bool Aligned = Opc == Hexagon::PS_vloadrw_ai;
1107  unsigned DstReg = MI.getOperand(0).getReg();
1108  unsigned NewOpc = Aligned ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai;
1109  unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1110 
1111  MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc),
1112  HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1113  .add(MI.getOperand(1))
1114  .addImm(MI.getOperand(2).getImm())
1115  .cloneMemRefs(MI);
1116  MI1New->getOperand(1).setIsKill(false);
1117  BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1118  .add(MI.getOperand(1))
1119  // The Vectors are indexed in multiples of vector size.
1120  .addImm(MI.getOperand(2).getImm() + Offset)
1121  .cloneMemRefs(MI);
1122  MBB.erase(MI);
1123  return true;
1124  }
1125  case Hexagon::PS_true: {
1126  unsigned Reg = MI.getOperand(0).getReg();
1127  BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1128  .addReg(Reg, RegState::Undef)
1129  .addReg(Reg, RegState::Undef);
1130  MBB.erase(MI);
1131  return true;
1132  }
1133  case Hexagon::PS_false: {
1134  unsigned Reg = MI.getOperand(0).getReg();
1135  BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1136  .addReg(Reg, RegState::Undef)
1137  .addReg(Reg, RegState::Undef);
1138  MBB.erase(MI);
1139  return true;
1140  }
1141  case Hexagon::PS_qtrue: {
1142  BuildMI(MBB, MI, DL, get(Hexagon::V6_veqw), MI.getOperand(0).getReg())
1143  .addReg(Hexagon::V0, RegState::Undef)
1144  .addReg(Hexagon::V0, RegState::Undef);
1145  MBB.erase(MI);
1146  return true;
1147  }
1148  case Hexagon::PS_qfalse: {
1149  BuildMI(MBB, MI, DL, get(Hexagon::V6_vgtw), MI.getOperand(0).getReg())
1150  .addReg(Hexagon::V0, RegState::Undef)
1151  .addReg(Hexagon::V0, RegState::Undef);
1152  MBB.erase(MI);
1153  return true;
1154  }
1155  case Hexagon::PS_vdd0: {
1156  unsigned Vd = MI.getOperand(0).getReg();
1157  BuildMI(MBB, MI, DL, get(Hexagon::V6_vsubw_dv), Vd)
1158  .addReg(Vd, RegState::Undef)
1159  .addReg(Vd, RegState::Undef);
1160  MBB.erase(MI);
1161  return true;
1162  }
1163  case Hexagon::PS_vmulw: {
1164  // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1165  unsigned DstReg = MI.getOperand(0).getReg();
1166  unsigned Src1Reg = MI.getOperand(1).getReg();
1167  unsigned Src2Reg = MI.getOperand(2).getReg();
1168  unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1169  unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1170  unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1171  unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1172  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1173  HRI.getSubReg(DstReg, Hexagon::isub_hi))
1174  .addReg(Src1SubHi)
1175  .addReg(Src2SubHi);
1176  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1177  HRI.getSubReg(DstReg, Hexagon::isub_lo))
1178  .addReg(Src1SubLo)
1179  .addReg(Src2SubLo);
1180  MBB.erase(MI);
1181  MRI.clearKillFlags(Src1SubHi);
1182  MRI.clearKillFlags(Src1SubLo);
1183  MRI.clearKillFlags(Src2SubHi);
1184  MRI.clearKillFlags(Src2SubLo);
1185  return true;
1186  }
1187  case Hexagon::PS_vmulw_acc: {
1188  // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1189  unsigned DstReg = MI.getOperand(0).getReg();
1190  unsigned Src1Reg = MI.getOperand(1).getReg();
1191  unsigned Src2Reg = MI.getOperand(2).getReg();
1192  unsigned Src3Reg = MI.getOperand(3).getReg();
1193  unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1194  unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1195  unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1196  unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1197  unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1198  unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1199  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1200  HRI.getSubReg(DstReg, Hexagon::isub_hi))
1201  .addReg(Src1SubHi)
1202  .addReg(Src2SubHi)
1203  .addReg(Src3SubHi);
1204  BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1205  HRI.getSubReg(DstReg, Hexagon::isub_lo))
1206  .addReg(Src1SubLo)
1207  .addReg(Src2SubLo)
1208  .addReg(Src3SubLo);
1209  MBB.erase(MI);
1210  MRI.clearKillFlags(Src1SubHi);
1211  MRI.clearKillFlags(Src1SubLo);
1212  MRI.clearKillFlags(Src2SubHi);
1213  MRI.clearKillFlags(Src2SubLo);
1214  MRI.clearKillFlags(Src3SubHi);
1215  MRI.clearKillFlags(Src3SubLo);
1216  return true;
1217  }
1218  case Hexagon::PS_pselect: {
1219  const MachineOperand &Op0 = MI.getOperand(0);
1220  const MachineOperand &Op1 = MI.getOperand(1);
1221  const MachineOperand &Op2 = MI.getOperand(2);
1222  const MachineOperand &Op3 = MI.getOperand(3);
1223  unsigned Rd = Op0.getReg();
1224  unsigned Pu = Op1.getReg();
1225  unsigned Rs = Op2.getReg();
1226  unsigned Rt = Op3.getReg();
1227  DebugLoc DL = MI.getDebugLoc();
1228  unsigned K1 = getKillRegState(Op1.isKill());
1229  unsigned K2 = getKillRegState(Op2.isKill());
1230  unsigned K3 = getKillRegState(Op3.isKill());
1231  if (Rd != Rs)
1232  BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1233  .addReg(Pu, (Rd == Rt) ? K1 : 0)
1234  .addReg(Rs, K2);
1235  if (Rd != Rt)
1236  BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1237  .addReg(Pu, K1)
1238  .addReg(Rt, K3);
1239  MBB.erase(MI);
1240  return true;
1241  }
1242  case Hexagon::PS_vselect: {
1243  const MachineOperand &Op0 = MI.getOperand(0);
1244  const MachineOperand &Op1 = MI.getOperand(1);
1245  const MachineOperand &Op2 = MI.getOperand(2);
1246  const MachineOperand &Op3 = MI.getOperand(3);
1247  LivePhysRegs LiveAtMI(HRI);
1248  getLiveRegsAt(LiveAtMI, MI);
1249  bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg());
1250  unsigned PReg = Op1.getReg();
1251  assert(Op1.getSubReg() == 0);
1252  unsigned PState = getRegState(Op1);
1253 
1254  if (Op0.getReg() != Op2.getReg()) {
1255  unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1256  : PState;
1257  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1258  .add(Op0)
1259  .addReg(PReg, S)
1260  .add(Op2);
1261  if (IsDestLive)
1262  T.addReg(Op0.getReg(), RegState::Implicit);
1263  IsDestLive = true;
1264  }
1265  if (Op0.getReg() != Op3.getReg()) {
1266  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1267  .add(Op0)
1268  .addReg(PReg, PState)
1269  .add(Op3);
1270  if (IsDestLive)
1271  T.addReg(Op0.getReg(), RegState::Implicit);
1272  }
1273  MBB.erase(MI);
1274  return true;
1275  }
1276  case Hexagon::PS_wselect: {
1277  MachineOperand &Op0 = MI.getOperand(0);
1278  MachineOperand &Op1 = MI.getOperand(1);
1279  MachineOperand &Op2 = MI.getOperand(2);
1280  MachineOperand &Op3 = MI.getOperand(3);
1281  LivePhysRegs LiveAtMI(HRI);
1282  getLiveRegsAt(LiveAtMI, MI);
1283  bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg());
1284  unsigned PReg = Op1.getReg();
1285  assert(Op1.getSubReg() == 0);
1286  unsigned PState = getRegState(Op1);
1287 
1288  if (Op0.getReg() != Op2.getReg()) {
1289  unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1290  : PState;
1291  unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo);
1292  unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi);
1293  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1294  .add(Op0)
1295  .addReg(PReg, S)
1296  .addReg(SrcHi)
1297  .addReg(SrcLo);
1298  if (IsDestLive)
1299  T.addReg(Op0.getReg(), RegState::Implicit);
1300  IsDestLive = true;
1301  }
1302  if (Op0.getReg() != Op3.getReg()) {
1303  unsigned SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo);
1304  unsigned SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi);
1305  auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1306  .add(Op0)
1307  .addReg(PReg, PState)
1308  .addReg(SrcHi)
1309  .addReg(SrcLo);
1310  if (IsDestLive)
1311  T.addReg(Op0.getReg(), RegState::Implicit);
1312  }
1313  MBB.erase(MI);
1314  return true;
1315  }
1316 
1317  case Hexagon::PS_tailcall_i:
1318  MI.setDesc(get(Hexagon::J2_jump));
1319  return true;
1320  case Hexagon::PS_tailcall_r:
1321  case Hexagon::PS_jmpret:
1322  MI.setDesc(get(Hexagon::J2_jumpr));
1323  return true;
1324  case Hexagon::PS_jmprett:
1325  MI.setDesc(get(Hexagon::J2_jumprt));
1326  return true;
1327  case Hexagon::PS_jmpretf:
1328  MI.setDesc(get(Hexagon::J2_jumprf));
1329  return true;
1330  case Hexagon::PS_jmprettnewpt:
1331  MI.setDesc(get(Hexagon::J2_jumprtnewpt));
1332  return true;
1333  case Hexagon::PS_jmpretfnewpt:
1334  MI.setDesc(get(Hexagon::J2_jumprfnewpt));
1335  return true;
1336  case Hexagon::PS_jmprettnew:
1337  MI.setDesc(get(Hexagon::J2_jumprtnew));
1338  return true;
1339  case Hexagon::PS_jmpretfnew:
1340  MI.setDesc(get(Hexagon::J2_jumprfnew));
1341  return true;
1342 
1343  case Hexagon::PS_loadrub_pci:
1344  return RealCirc(Hexagon::L2_loadrub_pci, /*HasImm*/true, /*MxOp*/4);
1345  case Hexagon::PS_loadrb_pci:
1346  return RealCirc(Hexagon::L2_loadrb_pci, /*HasImm*/true, /*MxOp*/4);
1347  case Hexagon::PS_loadruh_pci:
1348  return RealCirc(Hexagon::L2_loadruh_pci, /*HasImm*/true, /*MxOp*/4);
1349  case Hexagon::PS_loadrh_pci:
1350  return RealCirc(Hexagon::L2_loadrh_pci, /*HasImm*/true, /*MxOp*/4);
1351  case Hexagon::PS_loadri_pci:
1352  return RealCirc(Hexagon::L2_loadri_pci, /*HasImm*/true, /*MxOp*/4);
1353  case Hexagon::PS_loadrd_pci:
1354  return RealCirc(Hexagon::L2_loadrd_pci, /*HasImm*/true, /*MxOp*/4);
1355  case Hexagon::PS_loadrub_pcr:
1356  return RealCirc(Hexagon::L2_loadrub_pcr, /*HasImm*/false, /*MxOp*/3);
1357  case Hexagon::PS_loadrb_pcr:
1358  return RealCirc(Hexagon::L2_loadrb_pcr, /*HasImm*/false, /*MxOp*/3);
1359  case Hexagon::PS_loadruh_pcr:
1360  return RealCirc(Hexagon::L2_loadruh_pcr, /*HasImm*/false, /*MxOp*/3);
1361  case Hexagon::PS_loadrh_pcr:
1362  return RealCirc(Hexagon::L2_loadrh_pcr, /*HasImm*/false, /*MxOp*/3);
1363  case Hexagon::PS_loadri_pcr:
1364  return RealCirc(Hexagon::L2_loadri_pcr, /*HasImm*/false, /*MxOp*/3);
1365  case Hexagon::PS_loadrd_pcr:
1366  return RealCirc(Hexagon::L2_loadrd_pcr, /*HasImm*/false, /*MxOp*/3);
1367  case Hexagon::PS_storerb_pci:
1368  return RealCirc(Hexagon::S2_storerb_pci, /*HasImm*/true, /*MxOp*/3);
1369  case Hexagon::PS_storerh_pci:
1370  return RealCirc(Hexagon::S2_storerh_pci, /*HasImm*/true, /*MxOp*/3);
1371  case Hexagon::PS_storerf_pci:
1372  return RealCirc(Hexagon::S2_storerf_pci, /*HasImm*/true, /*MxOp*/3);
1373  case Hexagon::PS_storeri_pci:
1374  return RealCirc(Hexagon::S2_storeri_pci, /*HasImm*/true, /*MxOp*/3);
1375  case Hexagon::PS_storerd_pci:
1376  return RealCirc(Hexagon::S2_storerd_pci, /*HasImm*/true, /*MxOp*/3);
1377  case Hexagon::PS_storerb_pcr:
1378  return RealCirc(Hexagon::S2_storerb_pcr, /*HasImm*/false, /*MxOp*/2);
1379  case Hexagon::PS_storerh_pcr:
1380  return RealCirc(Hexagon::S2_storerh_pcr, /*HasImm*/false, /*MxOp*/2);
1381  case Hexagon::PS_storerf_pcr:
1382  return RealCirc(Hexagon::S2_storerf_pcr, /*HasImm*/false, /*MxOp*/2);
1383  case Hexagon::PS_storeri_pcr:
1384  return RealCirc(Hexagon::S2_storeri_pcr, /*HasImm*/false, /*MxOp*/2);
1385  case Hexagon::PS_storerd_pcr:
1386  return RealCirc(Hexagon::S2_storerd_pcr, /*HasImm*/false, /*MxOp*/2);
1387  }
1388 
1389  return false;
1390 }
1391 
1394  MachineBasicBlock &MBB = *MI.getParent();
1395  const DebugLoc &DL = MI.getDebugLoc();
1396  unsigned Opc = MI.getOpcode();
1398 
1399  switch (Opc) {
1400  case Hexagon::V6_vgathermh_pseudo:
1401  First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermh))
1402  .add(MI.getOperand(1))
1403  .add(MI.getOperand(2))
1404  .add(MI.getOperand(3));
1405  BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1406  .add(MI.getOperand(0))
1407  .addImm(0)
1408  .addReg(Hexagon::VTMP);
1409  MBB.erase(MI);
1410  return First.getInstrIterator();
1411 
1412  case Hexagon::V6_vgathermw_pseudo:
1413  First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermw))
1414  .add(MI.getOperand(1))
1415  .add(MI.getOperand(2))
1416  .add(MI.getOperand(3));
1417  BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1418  .add(MI.getOperand(0))
1419  .addImm(0)
1420  .addReg(Hexagon::VTMP);
1421  MBB.erase(MI);
1422  return First.getInstrIterator();
1423 
1424  case Hexagon::V6_vgathermhw_pseudo:
1425  First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhw))
1426  .add(MI.getOperand(1))
1427  .add(MI.getOperand(2))
1428  .add(MI.getOperand(3));
1429  BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1430  .add(MI.getOperand(0))
1431  .addImm(0)
1432  .addReg(Hexagon::VTMP);
1433  MBB.erase(MI);
1434  return First.getInstrIterator();
1435 
1436  case Hexagon::V6_vgathermhq_pseudo:
1437  First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhq))
1438  .add(MI.getOperand(1))
1439  .add(MI.getOperand(2))
1440  .add(MI.getOperand(3))
1441  .add(MI.getOperand(4));
1442  BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1443  .add(MI.getOperand(0))
1444  .addImm(0)
1445  .addReg(Hexagon::VTMP);
1446  MBB.erase(MI);
1447  return First.getInstrIterator();
1448 
1449  case Hexagon::V6_vgathermwq_pseudo:
1450  First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermwq))
1451  .add(MI.getOperand(1))
1452  .add(MI.getOperand(2))
1453  .add(MI.getOperand(3))
1454  .add(MI.getOperand(4));
1455  BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1456  .add(MI.getOperand(0))
1457  .addImm(0)
1458  .addReg(Hexagon::VTMP);
1459  MBB.erase(MI);
1460  return First.getInstrIterator();
1461 
1462  case Hexagon::V6_vgathermhwq_pseudo:
1463  First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhwq))
1464  .add(MI.getOperand(1))
1465  .add(MI.getOperand(2))
1466  .add(MI.getOperand(3))
1467  .add(MI.getOperand(4));
1468  BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1469  .add(MI.getOperand(0))
1470  .addImm(0)
1471  .addReg(Hexagon::VTMP);
1472  MBB.erase(MI);
1473  return First.getInstrIterator();
1474  }
1475 
1476  return MI.getIterator();
1477 }
1478 
1479 // We indicate that we want to reverse the branch by
1480 // inserting the reversed branching opcode.
1482  SmallVectorImpl<MachineOperand> &Cond) const {
1483  if (Cond.empty())
1484  return true;
1485  assert(Cond[0].isImm() && "First entry in the cond vector not imm-val");
1486  unsigned opcode = Cond[0].getImm();
1487  //unsigned temp;
1488  assert(get(opcode).isBranch() && "Should be a branching condition.");
1489  if (isEndLoopN(opcode))
1490  return true;
1491  unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1492  Cond[0].setImm(NewOpcode);
1493  return false;
1494 }
1495 
1498  DebugLoc DL;
1499  BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1500 }
1501 
1503  return getAddrMode(MI) == HexagonII::PostInc;
1504 }
1505 
1506 // Returns true if an instruction is predicated irrespective of the predicate
1507 // sense. For example, all of the following will return true.
1508 // if (p0) R1 = add(R2, R3)
1509 // if (!p0) R1 = add(R2, R3)
1510 // if (p0.new) R1 = add(R2, R3)
1511 // if (!p0.new) R1 = add(R2, R3)
1512 // Note: New-value stores are not included here as in the current
1513 // implementation, we don't need to check their predicate sense.
1515  const uint64_t F = MI.getDesc().TSFlags;
1517 }
1518 
1520  MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1521  if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1522  isEndLoopN(Cond[0].getImm())) {
1523  LLVM_DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
1524  return false;
1525  }
1526  int Opc = MI.getOpcode();
1527  assert (isPredicable(MI) && "Expected predicable instruction");
1528  bool invertJump = predOpcodeHasNot(Cond);
1529 
1530  // We have to predicate MI "in place", i.e. after this function returns,
1531  // MI will need to be transformed into a predicated form. To avoid com-
1532  // plicated manipulations with the operands (handling tied operands,
1533  // etc.), build a new temporary instruction, then overwrite MI with it.
1534 
1535  MachineBasicBlock &B = *MI.getParent();
1536  DebugLoc DL = MI.getDebugLoc();
1537  unsigned PredOpc = getCondOpcode(Opc, invertJump);
1538  MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1539  unsigned NOp = 0, NumOps = MI.getNumOperands();
1540  while (NOp < NumOps) {
1541  MachineOperand &Op = MI.getOperand(NOp);
1542  if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1543  break;
1544  T.add(Op);
1545  NOp++;
1546  }
1547 
1548  unsigned PredReg, PredRegPos, PredRegFlags;
1549  bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1550  (void)GotPredReg;
1551  assert(GotPredReg);
1552  T.addReg(PredReg, PredRegFlags);
1553  while (NOp < NumOps)
1554  T.add(MI.getOperand(NOp++));
1555 
1556  MI.setDesc(get(PredOpc));
1557  while (unsigned n = MI.getNumOperands())
1558  MI.RemoveOperand(n-1);
1559  for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1560  MI.addOperand(T->getOperand(i));
1561 
1563  B.erase(TI);
1564 
1566  MRI.clearKillFlags(PredReg);
1567  return true;
1568 }
1569 
1571  ArrayRef<MachineOperand> Pred2) const {
1572  // TODO: Fix this
1573  return false;
1574 }
1575 
1577  std::vector<MachineOperand> &Pred) const {
1578  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1579 
1580  for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) {
1581  MachineOperand MO = MI.getOperand(oper);
1582  if (MO.isReg()) {
1583  if (!MO.isDef())
1584  continue;
1585  const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1586  if (RC == &Hexagon::PredRegsRegClass) {
1587  Pred.push_back(MO);
1588  return true;
1589  }
1590  continue;
1591  } else if (MO.isRegMask()) {
1592  for (unsigned PR : Hexagon::PredRegsRegClass) {
1593  if (!MI.modifiesRegister(PR, &HRI))
1594  continue;
1595  Pred.push_back(MO);
1596  return true;
1597  }
1598  }
1599  }
1600  return false;
1601 }
1602 
1604  if (!MI.getDesc().isPredicable())
1605  return false;
1606 
1607  if (MI.isCall() || isTailCall(MI)) {
1608  if (!Subtarget.usePredicatedCalls())
1609  return false;
1610  }
1611 
1612  // HVX loads are not predicable on v60, but are on v62.
1613  if (!Subtarget.hasV62Ops()) {
1614  switch (MI.getOpcode()) {
1615  case Hexagon::V6_vL32b_ai:
1616  case Hexagon::V6_vL32b_pi:
1617  case Hexagon::V6_vL32b_ppu:
1618  case Hexagon::V6_vL32b_cur_ai:
1619  case Hexagon::V6_vL32b_cur_pi:
1620  case Hexagon::V6_vL32b_cur_ppu:
1621  case Hexagon::V6_vL32b_nt_ai:
1622  case Hexagon::V6_vL32b_nt_pi:
1623  case Hexagon::V6_vL32b_nt_ppu:
1624  case Hexagon::V6_vL32b_tmp_ai:
1625  case Hexagon::V6_vL32b_tmp_pi:
1626  case Hexagon::V6_vL32b_tmp_ppu:
1627  case Hexagon::V6_vL32b_nt_cur_ai:
1628  case Hexagon::V6_vL32b_nt_cur_pi:
1629  case Hexagon::V6_vL32b_nt_cur_ppu:
1630  case Hexagon::V6_vL32b_nt_tmp_ai:
1631  case Hexagon::V6_vL32b_nt_tmp_pi:
1632  case Hexagon::V6_vL32b_nt_tmp_ppu:
1633  return false;
1634  }
1635  }
1636  return true;
1637 }
1638 
1640  const MachineBasicBlock *MBB,
1641  const MachineFunction &MF) const {
1642  // Debug info is never a scheduling boundary. It's necessary to be explicit
1643  // due to the special treatment of IT instructions below, otherwise a
1644  // dbg_value followed by an IT will result in the IT instruction being
1645  // considered a scheduling hazard, which is wrong. It should be the actual
1646  // instruction preceding the dbg_value instruction(s), just like it is
1647  // when debug info is not present.
1648  if (MI.isDebugInstr())
1649  return false;
1650 
1651  // Throwing call is a boundary.
1652  if (MI.isCall()) {
1653  // Don't mess around with no return calls.
1654  if (doesNotReturn(MI))
1655  return true;
1656  // If any of the block's successors is a landing pad, this could be a
1657  // throwing call.
1658  for (auto I : MBB->successors())
1659  if (I->isEHPad())
1660  return true;
1661  }
1662 
1663  // Terminators and labels can't be scheduled around.
1664  if (MI.getDesc().isTerminator() || MI.isPosition())
1665  return true;
1666 
1667  if (MI.isInlineAsm() && !ScheduleInlineAsm)
1668  return true;
1669 
1670  return false;
1671 }
1672 
1673 /// Measure the specified inline asm to determine an approximation of its
1674 /// length.
1675 /// Comments (which run till the next SeparatorString or newline) do not
1676 /// count as an instruction.
1677 /// Any other non-whitespace text is considered an instruction, with
1678 /// multiple instructions separated by SeparatorString or newlines.
1679 /// Variable-length instructions are not handled here; this function
1680 /// may be overloaded in the target code to do that.
1681 /// Hexagon counts the number of ##'s and adjust for that many
1682 /// constant exenders.
1683 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1684  const MCAsmInfo &MAI) const {
1685  StringRef AStr(Str);
1686  // Count the number of instructions in the asm.
1687  bool atInsnStart = true;
1688  unsigned Length = 0;
1689  for (; *Str; ++Str) {
1690  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1691  strlen(MAI.getSeparatorString())) == 0)
1692  atInsnStart = true;
1693  if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
1694  Length += MAI.getMaxInstLength();
1695  atInsnStart = false;
1696  }
1697  if (atInsnStart && strncmp(Str, MAI.getCommentString().data(),
1698  MAI.getCommentString().size()) == 0)
1699  atInsnStart = false;
1700  }
1701 
1702  // Add to size number of constant extenders seen * 4.
1703  StringRef Occ("##");
1704  Length += AStr.count(Occ)*4;
1705  return Length;
1706 }
1707 
1710  const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1711  if (UseDFAHazardRec)
1712  return new HexagonHazardRecognizer(II, this, Subtarget);
1714 }
1715 
1716 /// For a comparison instruction, return the source registers in
1717 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1718 /// compares against in CmpValue. Return true if the comparison instruction
1719 /// can be analyzed.
1720 bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1721  unsigned &SrcReg2, int &Mask,
1722  int &Value) const {
1723  unsigned Opc = MI.getOpcode();
1724 
1725  // Set mask and the first source register.
1726  switch (Opc) {
1727  case Hexagon::C2_cmpeq:
1728  case Hexagon::C2_cmpeqp:
1729  case Hexagon::C2_cmpgt:
1730  case Hexagon::C2_cmpgtp:
1731  case Hexagon::C2_cmpgtu:
1732  case Hexagon::C2_cmpgtup:
1733  case Hexagon::C4_cmpneq:
1734  case Hexagon::C4_cmplte:
1735  case Hexagon::C4_cmplteu:
1736  case Hexagon::C2_cmpeqi:
1737  case Hexagon::C2_cmpgti:
1738  case Hexagon::C2_cmpgtui:
1739  case Hexagon::C4_cmpneqi:
1740  case Hexagon::C4_cmplteui:
1741  case Hexagon::C4_cmpltei:
1742  SrcReg = MI.getOperand(1).getReg();
1743  Mask = ~0;
1744  break;
1745  case Hexagon::A4_cmpbeq:
1746  case Hexagon::A4_cmpbgt:
1747  case Hexagon::A4_cmpbgtu:
1748  case Hexagon::A4_cmpbeqi:
1749  case Hexagon::A4_cmpbgti:
1750  case Hexagon::A4_cmpbgtui:
1751  SrcReg = MI.getOperand(1).getReg();
1752  Mask = 0xFF;
1753  break;
1754  case Hexagon::A4_cmpheq:
1755  case Hexagon::A4_cmphgt:
1756  case Hexagon::A4_cmphgtu:
1757  case Hexagon::A4_cmpheqi:
1758  case Hexagon::A4_cmphgti:
1759  case Hexagon::A4_cmphgtui:
1760  SrcReg = MI.getOperand(1).getReg();
1761  Mask = 0xFFFF;
1762  break;
1763  }
1764 
1765  // Set the value/second source register.
1766  switch (Opc) {
1767  case Hexagon::C2_cmpeq:
1768  case Hexagon::C2_cmpeqp:
1769  case Hexagon::C2_cmpgt:
1770  case Hexagon::C2_cmpgtp:
1771  case Hexagon::C2_cmpgtu:
1772  case Hexagon::C2_cmpgtup:
1773  case Hexagon::A4_cmpbeq:
1774  case Hexagon::A4_cmpbgt:
1775  case Hexagon::A4_cmpbgtu:
1776  case Hexagon::A4_cmpheq:
1777  case Hexagon::A4_cmphgt:
1778  case Hexagon::A4_cmphgtu:
1779  case Hexagon::C4_cmpneq:
1780  case Hexagon::C4_cmplte:
1781  case Hexagon::C4_cmplteu:
1782  SrcReg2 = MI.getOperand(2).getReg();
1783  return true;
1784 
1785  case Hexagon::C2_cmpeqi:
1786  case Hexagon::C2_cmpgtui:
1787  case Hexagon::C2_cmpgti:
1788  case Hexagon::C4_cmpneqi:
1789  case Hexagon::C4_cmplteui:
1790  case Hexagon::C4_cmpltei:
1791  case Hexagon::A4_cmpbeqi:
1792  case Hexagon::A4_cmpbgti:
1793  case Hexagon::A4_cmpbgtui:
1794  case Hexagon::A4_cmpheqi:
1795  case Hexagon::A4_cmphgti:
1796  case Hexagon::A4_cmphgtui: {
1797  SrcReg2 = 0;
1798  const MachineOperand &Op2 = MI.getOperand(2);
1799  if (!Op2.isImm())
1800  return false;
1801  Value = MI.getOperand(2).getImm();
1802  return true;
1803  }
1804  }
1805 
1806  return false;
1807 }
1808 
1810  const MachineInstr &MI,
1811  unsigned *PredCost) const {
1812  return getInstrTimingClassLatency(ItinData, MI);
1813 }
1814 
1816  const TargetSubtargetInfo &STI) const {
1817  const InstrItineraryData *II = STI.getInstrItineraryData();
1818  return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1819 }
1820 
1821 // Inspired by this pair:
1822 // %r13 = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
1823 // S2_storeri_io %r29, 132, killed %r1; flags: mem:ST4[FixedStack1]
1824 // Currently AA considers the addresses in these instructions to be aliasing.
1826  MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
1827  if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1829  return false;
1830 
1831  // Instructions that are pure loads, not loads and stores like memops are not
1832  // dependent.
1833  if (MIa.mayLoad() && !isMemOp(MIa) && MIb.mayLoad() && !isMemOp(MIb))
1834  return true;
1835 
1836  // Get the base register in MIa.
1837  unsigned BasePosA, OffsetPosA;
1838  if (!getBaseAndOffsetPosition(MIa, BasePosA, OffsetPosA))
1839  return false;
1840  const MachineOperand &BaseA = MIa.getOperand(BasePosA);
1841  unsigned BaseRegA = BaseA.getReg();
1842  unsigned BaseSubA = BaseA.getSubReg();
1843 
1844  // Get the base register in MIb.
1845  unsigned BasePosB, OffsetPosB;
1846  if (!getBaseAndOffsetPosition(MIb, BasePosB, OffsetPosB))
1847  return false;
1848  const MachineOperand &BaseB = MIb.getOperand(BasePosB);
1849  unsigned BaseRegB = BaseB.getReg();
1850  unsigned BaseSubB = BaseB.getSubReg();
1851 
1852  if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
1853  return false;
1854 
1855  // Get the access sizes.
1856  unsigned SizeA = getMemAccessSize(MIa);
1857  unsigned SizeB = getMemAccessSize(MIb);
1858 
1859  // Get the offsets. Handle immediates only for now.
1860  const MachineOperand &OffA = MIa.getOperand(OffsetPosA);
1861  const MachineOperand &OffB = MIb.getOperand(OffsetPosB);
1862  if (!MIa.getOperand(OffsetPosA).isImm() ||
1863  !MIb.getOperand(OffsetPosB).isImm())
1864  return false;
1865  int OffsetA = isPostIncrement(MIa) ? 0 : OffA.getImm();
1866  int OffsetB = isPostIncrement(MIb) ? 0 : OffB.getImm();
1867 
1868  // This is a mem access with the same base register and known offsets from it.
1869  // Reason about it.
1870  if (OffsetA > OffsetB) {
1871  uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
1872  return SizeB <= OffDiff;
1873  }
1874  if (OffsetA < OffsetB) {
1875  uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
1876  return SizeA <= OffDiff;
1877  }
1878 
1879  return false;
1880 }
1881 
1882 /// If the instruction is an increment of a constant value, return the amount.
1884  int &Value) const {
1885  if (isPostIncrement(MI)) {
1886  unsigned BasePos = 0, OffsetPos = 0;
1887  if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
1888  return false;
1889  const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
1890  if (OffsetOp.isImm()) {
1891  Value = OffsetOp.getImm();
1892  return true;
1893  }
1894  } else if (MI.getOpcode() == Hexagon::A2_addi) {
1895  const MachineOperand &AddOp = MI.getOperand(2);
1896  if (AddOp.isImm()) {
1897  Value = AddOp.getImm();
1898  return true;
1899  }
1900  }
1901 
1902  return false;
1903 }
1904 
1905 std::pair<unsigned, unsigned>
1907  return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
1908  TF & HexagonII::MO_Bitmasks);
1909 }
1910 
1913  using namespace HexagonII;
1914 
1915  static const std::pair<unsigned, const char*> Flags[] = {
1916  {MO_PCREL, "hexagon-pcrel"},
1917  {MO_GOT, "hexagon-got"},
1918  {MO_LO16, "hexagon-lo16"},
1919  {MO_HI16, "hexagon-hi16"},
1920  {MO_GPREL, "hexagon-gprel"},
1921  {MO_GDGOT, "hexagon-gdgot"},
1922  {MO_GDPLT, "hexagon-gdplt"},
1923  {MO_IE, "hexagon-ie"},
1924  {MO_IEGOT, "hexagon-iegot"},
1925  {MO_TPREL, "hexagon-tprel"}
1926  };
1927  return makeArrayRef(Flags);
1928 }
1929 
1932  using namespace HexagonII;
1933 
1934  static const std::pair<unsigned, const char*> Flags[] = {
1935  {HMOTF_ConstExtended, "hexagon-ext"}
1936  };
1937  return makeArrayRef(Flags);
1938 }
1939 
1942  const TargetRegisterClass *TRC;
1943  if (VT == MVT::i1) {
1944  TRC = &Hexagon::PredRegsRegClass;
1945  } else if (VT == MVT::i32 || VT == MVT::f32) {
1946  TRC = &Hexagon::IntRegsRegClass;
1947  } else if (VT == MVT::i64 || VT == MVT::f64) {
1948  TRC = &Hexagon::DoubleRegsRegClass;
1949  } else {
1950  llvm_unreachable("Cannot handle this register class");
1951  }
1952 
1953  unsigned NewReg = MRI.createVirtualRegister(TRC);
1954  return NewReg;
1955 }
1956 
1958  return (getAddrMode(MI) == HexagonII::AbsoluteSet);
1959 }
1960 
1962  const uint64_t F = MI.getDesc().TSFlags;
1964 }
1965 
1967  return getAddrMode(MI) == HexagonII::BaseImmOffset;
1968 }
1969 
1971  return !isTC1(MI) && !isTC2Early(MI) && !MI.getDesc().mayLoad() &&
1972  !MI.getDesc().mayStore() &&
1973  MI.getDesc().getOpcode() != Hexagon::S2_allocframe &&
1974  MI.getDesc().getOpcode() != Hexagon::L2_deallocframe &&
1975  !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
1976 }
1977 
1978 // Return true if the instruction is a compund branch instruction.
1980  return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
1981 }
1982 
1983 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
1984 // isFPImm and later getFPImm as well.
1986  const uint64_t F = MI.getDesc().TSFlags;
1988  if (isExtended) // Instruction must be extended.
1989  return true;
1990 
1991  unsigned isExtendable =
1993  if (!isExtendable)
1994  return false;
1995 
1996  if (MI.isCall())
1997  return false;
1998 
1999  short ExtOpNum = getCExtOpNum(MI);
2000  const MachineOperand &MO = MI.getOperand(ExtOpNum);
2001  // Use MO operand flags to determine if MO
2002  // has the HMOTF_ConstExtended flag set.
2004  return true;
2005  // If this is a Machine BB address we are talking about, and it is
2006  // not marked as extended, say so.
2007  if (MO.isMBB())
2008  return false;
2009 
2010  // We could be using an instruction with an extendable immediate and shoehorn
2011  // a global address into it. If it is a global address it will be constant
2012  // extended. We do this for COMBINE.
2013  if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
2014  MO.isJTI() || MO.isCPI() || MO.isFPImm())
2015  return true;
2016 
2017  // If the extendable operand is not 'Immediate' type, the instruction should
2018  // have 'isExtended' flag set.
2019  assert(MO.isImm() && "Extendable operand must be Immediate type");
2020 
2021  int MinValue = getMinValue(MI);
2022  int MaxValue = getMaxValue(MI);
2023  int ImmValue = MO.getImm();
2024 
2025  return (ImmValue < MinValue || ImmValue > MaxValue);
2026 }
2027 
2029  switch (MI.getOpcode()) {
2030  case Hexagon::L4_return:
2031  case Hexagon::L4_return_t:
2032  case Hexagon::L4_return_f:
2033  case Hexagon::L4_return_tnew_pnt:
2034  case Hexagon::L4_return_fnew_pnt:
2035  case Hexagon::L4_return_tnew_pt:
2036  case Hexagon::L4_return_fnew_pt:
2037  return true;
2038  }
2039  return false;
2040 }
2041 
2042 // Return true when ConsMI uses a register defined by ProdMI.
2044  const MachineInstr &ConsMI) const {
2045  if (!ProdMI.getDesc().getNumDefs())
2046  return false;
2047  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
2048 
2053 
2054  parseOperands(ProdMI, DefsA, UsesA);
2055  parseOperands(ConsMI, DefsB, UsesB);
2056 
2057  for (auto &RegA : DefsA)
2058  for (auto &RegB : UsesB) {
2059  // True data dependency.
2060  if (RegA == RegB)
2061  return true;
2062 
2064  for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs)
2065  if (RegB == *SubRegs)
2066  return true;
2067 
2069  for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs)
2070  if (RegA == *SubRegs)
2071  return true;
2072  }
2073 
2074  return false;
2075 }
2076 
2077 // Returns true if the instruction is alread a .cur.
2079  switch (MI.getOpcode()) {
2080  case Hexagon::V6_vL32b_cur_pi:
2081  case Hexagon::V6_vL32b_cur_ai:
2082  return true;
2083  }
2084  return false;
2085 }
2086 
2087 // Returns true, if any one of the operands is a dot new
2088 // insn, whether it is predicated dot new or register dot new.
2090  if (isNewValueInst(MI) || (isPredicated(MI) && isPredicatedNew(MI)))
2091  return true;
2092 
2093  return false;
2094 }
2095 
2096 /// Symmetrical. See if these two instructions are fit for duplex pair.
2098  const MachineInstr &MIb) const {
2101  return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
2102 }
2103 
2105  if (MI.mayLoad() || MI.mayStore() || MI.isCompare())
2106  return true;
2107 
2108  // Multiply
2109  unsigned SchedClass = MI.getDesc().getSchedClass();
2110  return is_TC4x(SchedClass) || is_TC3x(SchedClass);
2111 }
2112 
2113 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
2114  return (Opcode == Hexagon::ENDLOOP0 ||
2115  Opcode == Hexagon::ENDLOOP1);
2116 }
2117 
2118 bool HexagonInstrInfo::isExpr(unsigned OpType) const {
2119  switch(OpType) {
2126  return true;
2127  default:
2128  return false;
2129  }
2130 }
2131 
2133  const MCInstrDesc &MID = MI.getDesc();
2134  const uint64_t F = MID.TSFlags;
2136  return true;
2137 
2138  // TODO: This is largely obsolete now. Will need to be removed
2139  // in consecutive patches.
2140  switch (MI.getOpcode()) {
2141  // PS_fi and PS_fia remain special cases.
2142  case Hexagon::PS_fi:
2143  case Hexagon::PS_fia:
2144  return true;
2145  default:
2146  return false;
2147  }
2148  return false;
2149 }
2150 
2151 // This returns true in two cases:
2152 // - The OP code itself indicates that this is an extended instruction.
2153 // - One of MOs has been marked with HMOTF_ConstExtended flag.
2155  // First check if this is permanently extended op code.
2156  const uint64_t F = MI.getDesc().TSFlags;
2158  return true;
2159  // Use MO operand flags to determine if one of MI's operands
2160  // has HMOTF_ConstExtended flag set.
2161  for (const MachineOperand &MO : MI.operands())
2162  if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2163  return true;
2164  return false;
2165 }
2166 
2168  unsigned Opcode = MI.getOpcode();
2169  const uint64_t F = get(Opcode).TSFlags;
2170  return (F >> HexagonII::FPPos) & HexagonII::FPMask;
2171 }
2172 
2173 // No V60 HVX VMEM with A_INDIRECT.
2175  const MachineInstr &J) const {
2176  if (!isHVXVec(I))
2177  return false;
2178  if (!I.mayLoad() && !I.mayStore())
2179  return false;
2180  return J.isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
2181 }
2182 
2184  switch (MI.getOpcode()) {
2185  case Hexagon::J2_callr:
2186  case Hexagon::J2_callrf:
2187  case Hexagon::J2_callrt:
2188  case Hexagon::PS_call_nr:
2189  return true;
2190  }
2191  return false;
2192 }
2193 
2195  switch (MI.getOpcode()) {
2196  case Hexagon::L4_return:
2197  case Hexagon::L4_return_t:
2198  case Hexagon::L4_return_f:
2199  case Hexagon::L4_return_fnew_pnt:
2200  case Hexagon::L4_return_fnew_pt:
2201  case Hexagon::L4_return_tnew_pnt:
2202  case Hexagon::L4_return_tnew_pt:
2203  return true;
2204  }
2205  return false;
2206 }
2207 
2209  switch (MI.getOpcode()) {
2210  case Hexagon::J2_jumpr:
2211  case Hexagon::J2_jumprt:
2212  case Hexagon::J2_jumprf:
2213  case Hexagon::J2_jumprtnewpt:
2214  case Hexagon::J2_jumprfnewpt:
2215  case Hexagon::J2_jumprtnew:
2216  case Hexagon::J2_jumprfnew:
2217  return true;
2218  }
2219  return false;
2220 }
2221 
2222 // Return true if a given MI can accommodate given offset.
2223 // Use abs estimate as oppose to the exact number.
2224 // TODO: This will need to be changed to use MC level
2225 // definition of instruction extendable field size.
2227  unsigned offset) const {
2228  // This selection of jump instructions matches to that what
2229  // analyzeBranch can parse, plus NVJ.
2230  if (isNewValueJump(MI)) // r9:2
2231  return isInt<11>(offset);
2232 
2233  switch (MI.getOpcode()) {
2234  // Still missing Jump to address condition on register value.
2235  default:
2236  return false;
2237  case Hexagon::J2_jump: // bits<24> dst; // r22:2
2238  case Hexagon::J2_call:
2239  case Hexagon::PS_call_nr:
2240  return isInt<24>(offset);
2241  case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2242  case Hexagon::J2_jumpf:
2243  case Hexagon::J2_jumptnew:
2244  case Hexagon::J2_jumptnewpt:
2245  case Hexagon::J2_jumpfnew:
2246  case Hexagon::J2_jumpfnewpt:
2247  case Hexagon::J2_callt:
2248  case Hexagon::J2_callf:
2249  return isInt<17>(offset);
2250  case Hexagon::J2_loop0i:
2251  case Hexagon::J2_loop0iext:
2252  case Hexagon::J2_loop0r:
2253  case Hexagon::J2_loop0rext:
2254  case Hexagon::J2_loop1i:
2255  case Hexagon::J2_loop1iext:
2256  case Hexagon::J2_loop1r:
2257  case Hexagon::J2_loop1rext:
2258  return isInt<9>(offset);
2259  // TODO: Add all the compound branches here. Can we do this in Relation model?
2260  case Hexagon::J4_cmpeqi_tp0_jump_nt:
2261  case Hexagon::J4_cmpeqi_tp1_jump_nt:
2262  case Hexagon::J4_cmpeqn1_tp0_jump_nt:
2263  case Hexagon::J4_cmpeqn1_tp1_jump_nt:
2264  return isInt<11>(offset);
2265  }
2266 }
2267 
2269  const MachineInstr &ESMI) const {
2270  bool isLate = isLateResultInstr(LRMI);
2271  bool isEarly = isEarlySourceInstr(ESMI);
2272 
2273  LLVM_DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
2274  LLVM_DEBUG(LRMI.dump());
2275  LLVM_DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
2276  LLVM_DEBUG(ESMI.dump());
2277 
2278  if (isLate && isEarly) {
2279  LLVM_DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
2280  return true;
2281  }
2282 
2283  return false;
2284 }
2285 
2287  switch (MI.getOpcode()) {
2288  case TargetOpcode::EXTRACT_SUBREG:
2289  case TargetOpcode::INSERT_SUBREG:
2290  case TargetOpcode::SUBREG_TO_REG:
2291  case TargetOpcode::REG_SEQUENCE:
2292  case TargetOpcode::IMPLICIT_DEF:
2293  case TargetOpcode::COPY:
2295  case TargetOpcode::PHI:
2296  return false;
2297  default:
2298  break;
2299  }
2300 
2301  unsigned SchedClass = MI.getDesc().getSchedClass();
2302  return !is_TC1(SchedClass);
2303 }
2304 
2306  // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2307  // resource, but all operands can be received late like an ALU instruction.
2308  return getType(MI) == HexagonII::TypeCVI_VX_LATE;
2309 }
2310 
2312  unsigned Opcode = MI.getOpcode();
2313  return Opcode == Hexagon::J2_loop0i ||
2314  Opcode == Hexagon::J2_loop0r ||
2315  Opcode == Hexagon::J2_loop0iext ||
2316  Opcode == Hexagon::J2_loop0rext ||
2317  Opcode == Hexagon::J2_loop1i ||
2318  Opcode == Hexagon::J2_loop1r ||
2319  Opcode == Hexagon::J2_loop1iext ||
2320  Opcode == Hexagon::J2_loop1rext;
2321 }
2322 
2324  switch (MI.getOpcode()) {
2325  default: return false;
2326  case Hexagon::L4_iadd_memopw_io:
2327  case Hexagon::L4_isub_memopw_io:
2328  case Hexagon::L4_add_memopw_io:
2329  case Hexagon::L4_sub_memopw_io:
2330  case Hexagon::L4_and_memopw_io:
2331  case Hexagon::L4_or_memopw_io:
2332  case Hexagon::L4_iadd_memoph_io:
2333  case Hexagon::L4_isub_memoph_io:
2334  case Hexagon::L4_add_memoph_io:
2335  case Hexagon::L4_sub_memoph_io:
2336  case Hexagon::L4_and_memoph_io:
2337  case Hexagon::L4_or_memoph_io:
2338  case Hexagon::L4_iadd_memopb_io:
2339  case Hexagon::L4_isub_memopb_io:
2340  case Hexagon::L4_add_memopb_io:
2341  case Hexagon::L4_sub_memopb_io:
2342  case Hexagon::L4_and_memopb_io:
2343  case Hexagon::L4_or_memopb_io:
2344  case Hexagon::L4_ior_memopb_io:
2345  case Hexagon::L4_ior_memoph_io:
2346  case Hexagon::L4_ior_memopw_io:
2347  case Hexagon::L4_iand_memopb_io:
2348  case Hexagon::L4_iand_memoph_io:
2349  case Hexagon::L4_iand_memopw_io:
2350  return true;
2351  }
2352  return false;
2353 }
2354 
2356  const uint64_t F = MI.getDesc().TSFlags;
2358 }
2359 
2360 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2361  const uint64_t F = get(Opcode).TSFlags;
2363 }
2364 
2366  return isNewValueJump(MI) || isNewValueStore(MI);
2367 }
2368 
2370  return isNewValue(MI) && MI.isBranch();
2371 }
2372 
2373 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2374  return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2375 }
2376 
2378  const uint64_t F = MI.getDesc().TSFlags;
2380 }
2381 
2382 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2383  const uint64_t F = get(Opcode).TSFlags;
2385 }
2386 
2387 // Returns true if a particular operand is extendable for an instruction.
2389  unsigned OperandNum) const {
2390  const uint64_t F = MI.getDesc().TSFlags;
2392  == OperandNum;
2393 }
2394 
2396  const uint64_t F = MI.getDesc().TSFlags;
2397  assert(isPredicated(MI));
2399 }
2400 
2401 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2402  const uint64_t F = get(Opcode).TSFlags;
2403  assert(isPredicated(Opcode));
2405 }
2406 
2408  const uint64_t F = MI.getDesc().TSFlags;
2409  return !((F >> HexagonII::PredicatedFalsePos) &
2411 }
2412 
2413 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2414  const uint64_t F = get(Opcode).TSFlags;
2415  // Make sure that the instruction is predicated.
2417  return !((F >> HexagonII::PredicatedFalsePos) &
2419 }
2420 
2421 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2422  const uint64_t F = get(Opcode).TSFlags;
2424 }
2425 
2426 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2427  const uint64_t F = get(Opcode).TSFlags;
2429 }
2430 
2431 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2432  const uint64_t F = get(Opcode).TSFlags;
2433  assert(get(Opcode).isBranch() &&
2434  (isPredicatedNew(Opcode) || isNewValue(Opcode)));
2435  return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2436 }
2437 
2439  return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2440  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2441  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2442  MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2443 }
2444 
2446  switch (MI.getOpcode()) {
2447  // Byte
2448  case Hexagon::L2_loadrb_io:
2449  case Hexagon::L4_loadrb_ur:
2450  case Hexagon::L4_loadrb_ap:
2451  case Hexagon::L2_loadrb_pr:
2452  case Hexagon::L2_loadrb_pbr:
2453  case Hexagon::L2_loadrb_pi:
2454  case Hexagon::L2_loadrb_pci:
2455  case Hexagon::L2_loadrb_pcr:
2456  case Hexagon::L2_loadbsw2_io:
2457  case Hexagon::L4_loadbsw2_ur:
2458  case Hexagon::L4_loadbsw2_ap:
2459  case Hexagon::L2_loadbsw2_pr:
2460  case Hexagon::L2_loadbsw2_pbr:
2461  case Hexagon::L2_loadbsw2_pi:
2462  case Hexagon::L2_loadbsw2_pci:
2463  case Hexagon::L2_loadbsw2_pcr:
2464  case Hexagon::L2_loadbsw4_io:
2465  case Hexagon::L4_loadbsw4_ur:
2466  case Hexagon::L4_loadbsw4_ap:
2467  case Hexagon::L2_loadbsw4_pr:
2468  case Hexagon::L2_loadbsw4_pbr:
2469  case Hexagon::L2_loadbsw4_pi:
2470  case Hexagon::L2_loadbsw4_pci:
2471  case Hexagon::L2_loadbsw4_pcr:
2472  case Hexagon::L4_loadrb_rr:
2473  case Hexagon::L2_ploadrbt_io:
2474  case Hexagon::L2_ploadrbt_pi:
2475  case Hexagon::L2_ploadrbf_io:
2476  case Hexagon::L2_ploadrbf_pi:
2477  case Hexagon::L2_ploadrbtnew_io:
2478  case Hexagon::L2_ploadrbfnew_io:
2479  case Hexagon::L4_ploadrbt_rr:
2480  case Hexagon::L4_ploadrbf_rr:
2481  case Hexagon::L4_ploadrbtnew_rr:
2482  case Hexagon::L4_ploadrbfnew_rr:
2483  case Hexagon::L2_ploadrbtnew_pi:
2484  case Hexagon::L2_ploadrbfnew_pi:
2485  case Hexagon::L4_ploadrbt_abs:
2486  case Hexagon::L4_ploadrbf_abs:
2487  case Hexagon::L4_ploadrbtnew_abs:
2488  case Hexagon::L4_ploadrbfnew_abs:
2489  case Hexagon::L2_loadrbgp:
2490  // Half
2491  case Hexagon::L2_loadrh_io:
2492  case Hexagon::L4_loadrh_ur:
2493  case Hexagon::L4_loadrh_ap:
2494  case Hexagon::L2_loadrh_pr:
2495  case Hexagon::L2_loadrh_pbr:
2496  case Hexagon::L2_loadrh_pi:
2497  case Hexagon::L2_loadrh_pci:
2498  case Hexagon::L2_loadrh_pcr:
2499  case Hexagon::L4_loadrh_rr:
2500  case Hexagon::L2_ploadrht_io:
2501  case Hexagon::L2_ploadrht_pi:
2502  case Hexagon::L2_ploadrhf_io:
2503  case Hexagon::L2_ploadrhf_pi:
2504  case Hexagon::L2_ploadrhtnew_io:
2505  case Hexagon::L2_ploadrhfnew_io:
2506  case Hexagon::L4_ploadrht_rr:
2507  case Hexagon::L4_ploadrhf_rr:
2508  case Hexagon::L4_ploadrhtnew_rr:
2509  case Hexagon::L4_ploadrhfnew_rr:
2510  case Hexagon::L2_ploadrhtnew_pi:
2511  case Hexagon::L2_ploadrhfnew_pi:
2512  case Hexagon::L4_ploadrht_abs:
2513  case Hexagon::L4_ploadrhf_abs:
2514  case Hexagon::L4_ploadrhtnew_abs:
2515  case Hexagon::L4_ploadrhfnew_abs:
2516  case Hexagon::L2_loadrhgp:
2517  return true;
2518  default:
2519  return false;
2520  }
2521 }
2522 
2524  const uint64_t F = MI.getDesc().TSFlags;
2525  return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2526 }
2527 
2529  switch (MI.getOpcode()) {
2530  case Hexagon::STriw_pred:
2531  case Hexagon::LDriw_pred:
2532  return true;
2533  default:
2534  return false;
2535  }
2536 }
2537 
2539  if (!MI.isBranch())
2540  return false;
2541 
2542  for (auto &Op : MI.operands())
2543  if (Op.isGlobal() || Op.isSymbol())
2544  return true;
2545  return false;
2546 }
2547 
2548 // Returns true when SU has a timing class TC1.
2550  unsigned SchedClass = MI.getDesc().getSchedClass();
2551  return is_TC1(SchedClass);
2552 }
2553 
2555  unsigned SchedClass = MI.getDesc().getSchedClass();
2556  return is_TC2(SchedClass);
2557 }
2558 
2560  unsigned SchedClass = MI.getDesc().getSchedClass();
2561  return is_TC2early(SchedClass);
2562 }
2563 
2565  unsigned SchedClass = MI.getDesc().getSchedClass();
2566  return is_TC4x(SchedClass);
2567 }
2568 
2569 // Schedule this ASAP.
2571  const MachineInstr &MI2) const {
2572  if (mayBeCurLoad(MI1)) {
2573  // if (result of SU is used in Next) return true;
2574  unsigned DstReg = MI1.getOperand(0).getReg();
2575  int N = MI2.getNumOperands();
2576  for (int I = 0; I < N; I++)
2577  if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg())
2578  return true;
2579  }
2580  if (mayBeNewStore(MI2))
2581  if (MI2.getOpcode() == Hexagon::V6_vS32b_pi)
2582  if (MI1.getOperand(0).isReg() && MI2.getOperand(3).isReg() &&
2583  MI1.getOperand(0).getReg() == MI2.getOperand(3).getReg())
2584  return true;
2585  return false;
2586 }
2587 
2589  const uint64_t V = getType(MI);
2591 }
2592 
2593 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
2595  int Size = VT.getSizeInBits() / 8;
2596  if (Offset % Size != 0)
2597  return false;
2598  int Count = Offset / Size;
2599 
2600  switch (VT.getSimpleVT().SimpleTy) {
2601  // For scalars the auto-inc is s4
2602  case MVT::i8:
2603  case MVT::i16:
2604  case MVT::i32:
2605  case MVT::i64:
2606  case MVT::f32:
2607  case MVT::f64:
2608  case MVT::v2i16:
2609  case MVT::v2i32:
2610  case MVT::v4i8:
2611  case MVT::v4i16:
2612  case MVT::v8i8:
2613  return isInt<4>(Count);
2614  // For HVX vectors the auto-inc is s3
2615  case MVT::v64i8:
2616  case MVT::v32i16:
2617  case MVT::v16i32:
2618  case MVT::v8i64:
2619  case MVT::v128i8:
2620  case MVT::v64i16:
2621  case MVT::v32i32:
2622  case MVT::v16i64:
2623  return isInt<3>(Count);
2624  default:
2625  break;
2626  }
2627 
2628  llvm_unreachable("Not an valid type!");
2629 }
2630 
2631 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2632  const TargetRegisterInfo *TRI, bool Extend) const {
2633  // This function is to check whether the "Offset" is in the correct range of
2634  // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2635  // inserted to calculate the final address. Due to this reason, the function
2636  // assumes that the "Offset" has correct alignment.
2637  // We used to assert if the offset was not properly aligned, however,
2638  // there are cases where a misaligned pointer recast can cause this
2639  // problem, and we need to allow for it. The front end warns of such
2640  // misaligns with respect to load size.
2641  switch (Opcode) {
2642  case Hexagon::PS_vstorerq_ai:
2643  case Hexagon::PS_vstorerw_ai:
2644  case Hexagon::PS_vstorerw_nt_ai:
2645  case Hexagon::PS_vloadrq_ai:
2646  case Hexagon::PS_vloadrw_ai:
2647  case Hexagon::PS_vloadrw_nt_ai:
2648  case Hexagon::V6_vL32b_ai:
2649  case Hexagon::V6_vS32b_ai:
2650  case Hexagon::V6_vL32b_nt_ai:
2651  case Hexagon::V6_vS32b_nt_ai:
2652  case Hexagon::V6_vL32Ub_ai:
2653  case Hexagon::V6_vS32Ub_ai: {
2654  unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
2655  assert(isPowerOf2_32(VectorSize));
2656  if (Offset & (VectorSize-1))
2657  return false;
2658  return isInt<4>(Offset >> Log2_32(VectorSize));
2659  }
2660 
2661  case Hexagon::J2_loop0i:
2662  case Hexagon::J2_loop1i:
2663  return isUInt<10>(Offset);
2664 
2665  case Hexagon::S4_storeirb_io:
2666  case Hexagon::S4_storeirbt_io:
2667  case Hexagon::S4_storeirbf_io:
2668  return isUInt<6>(Offset);
2669 
2670  case Hexagon::S4_storeirh_io:
2671  case Hexagon::S4_storeirht_io:
2672  case Hexagon::S4_storeirhf_io:
2673  return isShiftedUInt<6,1>(Offset);
2674 
2675  case Hexagon::S4_storeiri_io:
2676  case Hexagon::S4_storeirit_io:
2677  case Hexagon::S4_storeirif_io:
2678  return isShiftedUInt<6,2>(Offset);
2679  }
2680 
2681  if (Extend)
2682  return true;
2683 
2684  switch (Opcode) {
2685  case Hexagon::L2_loadri_io:
2686  case Hexagon::S2_storeri_io:
2687  return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2688  (Offset <= Hexagon_MEMW_OFFSET_MAX);
2689 
2690  case Hexagon::L2_loadrd_io:
2691  case Hexagon::S2_storerd_io:
2692  return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2693  (Offset <= Hexagon_MEMD_OFFSET_MAX);
2694 
2695  case Hexagon::L2_loadrh_io:
2696  case Hexagon::L2_loadruh_io:
2697  case Hexagon::S2_storerh_io:
2698  case Hexagon::S2_storerf_io:
2699  return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2700  (Offset <= Hexagon_MEMH_OFFSET_MAX);
2701 
2702  case Hexagon::L2_loadrb_io:
2703  case Hexagon::L2_loadrub_io:
2704  case Hexagon::S2_storerb_io:
2705  return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2706  (Offset <= Hexagon_MEMB_OFFSET_MAX);
2707 
2708  case Hexagon::A2_addi:
2709  return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2710  (Offset <= Hexagon_ADDI_OFFSET_MAX);
2711 
2712  case Hexagon::L4_iadd_memopw_io:
2713  case Hexagon::L4_isub_memopw_io:
2714  case Hexagon::L4_add_memopw_io:
2715  case Hexagon::L4_sub_memopw_io:
2716  case Hexagon::L4_and_memopw_io:
2717  case Hexagon::L4_or_memopw_io:
2718  return (0 <= Offset && Offset <= 255);
2719 
2720  case Hexagon::L4_iadd_memoph_io:
2721  case Hexagon::L4_isub_memoph_io:
2722  case Hexagon::L4_add_memoph_io:
2723  case Hexagon::L4_sub_memoph_io:
2724  case Hexagon::L4_and_memoph_io:
2725  case Hexagon::L4_or_memoph_io:
2726  return (0 <= Offset && Offset <= 127);
2727 
2728  case Hexagon::L4_iadd_memopb_io:
2729  case Hexagon::L4_isub_memopb_io:
2730  case Hexagon::L4_add_memopb_io:
2731  case Hexagon::L4_sub_memopb_io:
2732  case Hexagon::L4_and_memopb_io:
2733  case Hexagon::L4_or_memopb_io:
2734  return (0 <= Offset && Offset <= 63);
2735 
2736  // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2737  // any size. Later pass knows how to handle it.
2738  case Hexagon::STriw_pred:
2739  case Hexagon::LDriw_pred:
2740  case Hexagon::STriw_ctr:
2741  case Hexagon::LDriw_ctr:
2742  return true;
2743 
2744  case Hexagon::PS_fi:
2745  case Hexagon::PS_fia:
2746  case Hexagon::INLINEASM:
2747  return true;
2748 
2749  case Hexagon::L2_ploadrbt_io:
2750  case Hexagon::L2_ploadrbf_io:
2751  case Hexagon::L2_ploadrubt_io:
2752  case Hexagon::L2_ploadrubf_io:
2753  case Hexagon::S2_pstorerbt_io:
2754  case Hexagon::S2_pstorerbf_io:
2755  return isUInt<6>(Offset);
2756 
2757  case Hexagon::L2_ploadrht_io:
2758  case Hexagon::L2_ploadrhf_io:
2759  case Hexagon::L2_ploadruht_io:
2760  case Hexagon::L2_ploadruhf_io:
2761  case Hexagon::S2_pstorerht_io:
2762  case Hexagon::S2_pstorerhf_io:
2763  return isShiftedUInt<6,1>(Offset);
2764 
2765  case Hexagon::L2_ploadrit_io:
2766  case Hexagon::L2_ploadrif_io:
2767  case Hexagon::S2_pstorerit_io:
2768  case Hexagon::S2_pstorerif_io:
2769  return isShiftedUInt<6,2>(Offset);
2770 
2771  case Hexagon::L2_ploadrdt_io:
2772  case Hexagon::L2_ploadrdf_io:
2773  case Hexagon::S2_pstorerdt_io:
2774  case Hexagon::S2_pstorerdf_io:
2775  return isShiftedUInt<6,3>(Offset);
2776  } // switch
2777 
2778  llvm_unreachable("No offset range is defined for this opcode. "
2779  "Please define it in the above switch statement!");
2780 }
2781 
2783  return isHVXVec(MI) && isAccumulator(MI);
2784 }
2785 
2787  const uint64_t F = get(MI.getOpcode()).TSFlags;
2788  const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2789  return
2790  V == HexagonII::TypeCVI_VA ||
2792 }
2793 
2795  const MachineInstr &ConsMI) const {
2796  if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2797  return true;
2798 
2799  if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2800  return true;
2801 
2802  if (mayBeNewStore(ConsMI))
2803  return true;
2804 
2805  return false;
2806 }
2807 
2809  switch (MI.getOpcode()) {
2810  // Byte
2811  case Hexagon::L2_loadrub_io:
2812  case Hexagon::L4_loadrub_ur:
2813  case Hexagon::L4_loadrub_ap:
2814  case Hexagon::L2_loadrub_pr:
2815  case Hexagon::L2_loadrub_pbr:
2816  case Hexagon::L2_loadrub_pi:
2817  case Hexagon::L2_loadrub_pci:
2818  case Hexagon::L2_loadrub_pcr:
2819  case Hexagon::L2_loadbzw2_io:
2820  case Hexagon::L4_loadbzw2_ur:
2821  case Hexagon::L4_loadbzw2_ap:
2822  case Hexagon::L2_loadbzw2_pr:
2823  case Hexagon::L2_loadbzw2_pbr:
2824  case Hexagon::L2_loadbzw2_pi:
2825  case Hexagon::L2_loadbzw2_pci:
2826  case Hexagon::L2_loadbzw2_pcr:
2827  case Hexagon::L2_loadbzw4_io:
2828  case Hexagon::L4_loadbzw4_ur:
2829  case Hexagon::L4_loadbzw4_ap:
2830  case Hexagon::L2_loadbzw4_pr:
2831  case Hexagon::L2_loadbzw4_pbr:
2832  case Hexagon::L2_loadbzw4_pi:
2833  case Hexagon::L2_loadbzw4_pci:
2834  case Hexagon::L2_loadbzw4_pcr:
2835  case Hexagon::L4_loadrub_rr:
2836  case Hexagon::L2_ploadrubt_io:
2837  case Hexagon::L2_ploadrubt_pi:
2838  case Hexagon::L2_ploadrubf_io:
2839  case Hexagon::L2_ploadrubf_pi:
2840  case Hexagon::L2_ploadrubtnew_io:
2841  case Hexagon::L2_ploadrubfnew_io:
2842  case Hexagon::L4_ploadrubt_rr:
2843  case Hexagon::L4_ploadrubf_rr:
2844  case Hexagon::L4_ploadrubtnew_rr:
2845  case Hexagon::L4_ploadrubfnew_rr:
2846  case Hexagon::L2_ploadrubtnew_pi:
2847  case Hexagon::L2_ploadrubfnew_pi:
2848  case Hexagon::L4_ploadrubt_abs:
2849  case Hexagon::L4_ploadrubf_abs:
2850  case Hexagon::L4_ploadrubtnew_abs:
2851  case Hexagon::L4_ploadrubfnew_abs:
2852  case Hexagon::L2_loadrubgp:
2853  // Half
2854  case Hexagon::L2_loadruh_io:
2855  case Hexagon::L4_loadruh_ur:
2856  case Hexagon::L4_loadruh_ap:
2857  case Hexagon::L2_loadruh_pr:
2858  case Hexagon::L2_loadruh_pbr:
2859  case Hexagon::L2_loadruh_pi:
2860  case Hexagon::L2_loadruh_pci:
2861  case Hexagon::L2_loadruh_pcr:
2862  case Hexagon::L4_loadruh_rr:
2863  case Hexagon::L2_ploadruht_io:
2864  case Hexagon::L2_ploadruht_pi:
2865  case Hexagon::L2_ploadruhf_io:
2866  case Hexagon::L2_ploadruhf_pi:
2867  case Hexagon::L2_ploadruhtnew_io:
2868  case Hexagon::L2_ploadruhfnew_io:
2869  case Hexagon::L4_ploadruht_rr:
2870  case Hexagon::L4_ploadruhf_rr:
2871  case Hexagon::L4_ploadruhtnew_rr:
2872  case Hexagon::L4_ploadruhfnew_rr:
2873  case Hexagon::L2_ploadruhtnew_pi:
2874  case Hexagon::L2_ploadruhfnew_pi:
2875  case Hexagon::L4_ploadruht_abs:
2876  case Hexagon::L4_ploadruhf_abs:
2877  case Hexagon::L4_ploadruhtnew_abs:
2878  case Hexagon::L4_ploadruhfnew_abs:
2879  case Hexagon::L2_loadruhgp:
2880  return true;
2881  default:
2882  return false;
2883  }
2884 }
2885 
2886 // Add latency to instruction.
2888  const MachineInstr &MI2) const {
2889  if (isHVXVec(MI1) && isHVXVec(MI2))
2890  if (!isVecUsableNextPacket(MI1, MI2))
2891  return true;
2892  return false;
2893 }
2894 
2895 /// Get the base register and byte offset of a load/store instr.
2897  MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
2898  const TargetRegisterInfo *TRI) const {
2899  unsigned AccessSize = 0;
2900  BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize);
2901  assert((!BaseOp || BaseOp->isReg()) &&
2902  "getMemOperandWithOffset only supports base "
2903  "operands of type register.");
2904  return BaseOp != nullptr;
2905 }
2906 
2907 /// Can these instructions execute at the same time in a bundle.
2909  const MachineInstr &Second) const {
2910  if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) {
2911  const MachineOperand &Op = Second.getOperand(0);
2912  if (Op.isReg() && Op.isUse() && Op.getReg() == Hexagon::R29)
2913  return true;
2914  }
2915  if (DisableNVSchedule)
2916  return false;
2917  if (mayBeNewStore(Second)) {
2918  // Make sure the definition of the first instruction is the value being
2919  // stored.
2920  const MachineOperand &Stored =
2921  Second.getOperand(Second.getNumOperands() - 1);
2922  if (!Stored.isReg())
2923  return false;
2924  for (unsigned i = 0, e = First.getNumOperands(); i < e; ++i) {
2925  const MachineOperand &Op = First.getOperand(i);
2926  if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
2927  return true;
2928  }
2929  }
2930  return false;
2931 }
2932 
2934  unsigned Opc = CallMI.getOpcode();
2935  return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
2936 }
2937 
2939  for (auto &I : *B)
2940  if (I.isEHLabel())
2941  return true;
2942  return false;
2943 }
2944 
2945 // Returns true if an instruction can be converted into a non-extended
2946 // equivalent instruction.
2948  short NonExtOpcode;
2949  // Check if the instruction has a register form that uses register in place
2950  // of the extended operand, if so return that as the non-extended form.
2951  if (Hexagon::getRegForm(MI.getOpcode()) >= 0)
2952  return true;
2953 
2954  if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
2955  // Check addressing mode and retrieve non-ext equivalent instruction.
2956 
2957  switch (getAddrMode(MI)) {
2958  case HexagonII::Absolute:
2959  // Load/store with absolute addressing mode can be converted into
2960  // base+offset mode.
2961  NonExtOpcode = Hexagon::changeAddrMode_abs_io(MI.getOpcode());
2962  break;
2964  // Load/store with base+offset addressing mode can be converted into
2965  // base+register offset addressing mode. However left shift operand should
2966  // be set to 0.
2967  NonExtOpcode = Hexagon::changeAddrMode_io_rr(MI.getOpcode());
2968  break;
2970  NonExtOpcode = Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
2971  break;
2972  default:
2973  return false;
2974  }
2975  if (NonExtOpcode < 0)
2976  return false;
2977  return true;
2978  }
2979  return false;
2980 }
2981 
2983  return Hexagon::getRealHWInstr(MI.getOpcode(),
2984  Hexagon::InstrType_Pseudo) >= 0;
2985 }
2986 
2988  const {
2990  while (I != E) {
2991  if (I->isBarrier())
2992  return true;
2993  ++I;
2994  }
2995  return false;
2996 }
2997 
2998 // Returns true, if a LD insn can be promoted to a cur load.
3000  const uint64_t F = MI.getDesc().TSFlags;
3002  Subtarget.hasV60Ops();
3003 }
3004 
3005 // Returns true, if a ST insn can be promoted to a new-value store.
3007  if (MI.mayStore() && !Subtarget.useNewValueStores())
3008  return false;
3009 
3010  const uint64_t F = MI.getDesc().TSFlags;
3012 }
3013 
3015  const MachineInstr &ConsMI) const {
3016  // There is no stall when ProdMI is not a V60 vector.
3017  if (!isHVXVec(ProdMI))
3018  return false;
3019 
3020  // There is no stall when ProdMI and ConsMI are not dependent.
3021  if (!isDependent(ProdMI, ConsMI))
3022  return false;
3023 
3024  // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
3025  // are scheduled in consecutive packets.
3026  if (isVecUsableNextPacket(ProdMI, ConsMI))
3027  return false;
3028 
3029  return true;
3030 }
3031 
3034  // There is no stall when I is not a V60 vector.
3035  if (!isHVXVec(MI))
3036  return false;
3037 
3039  MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
3040 
3041  if (!MII->isBundle())
3042  return producesStall(*MII, MI);
3043 
3044  for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3045  const MachineInstr &J = *MII;
3046  if (producesStall(J, MI))
3047  return true;
3048  }
3049  return false;
3050 }
3051 
3053  unsigned PredReg) const {
3054  for (const MachineOperand &MO : MI.operands()) {
3055  // Predicate register must be explicitly defined.
3056  if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
3057  return false;
3058  if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3059  return false;
3060  }
3061 
3062  // Instruction that produce late predicate cannot be used as sources of
3063  // dot-new.
3064  switch (MI.getOpcode()) {
3065  case Hexagon::A4_addp_c:
3066  case Hexagon::A4_subp_c:
3067  case Hexagon::A4_tlbmatch:
3068  case Hexagon::A5_ACS:
3069  case Hexagon::F2_sfinvsqrta:
3070  case Hexagon::F2_sfrecipa:
3071  case Hexagon::J2_endloop0:
3072  case Hexagon::J2_endloop01:
3073  case Hexagon::J2_ploop1si:
3074  case Hexagon::J2_ploop1sr:
3075  case Hexagon::J2_ploop2si:
3076  case Hexagon::J2_ploop2sr:
3077  case Hexagon::J2_ploop3si:
3078  case Hexagon::J2_ploop3sr:
3079  case Hexagon::S2_cabacdecbin:
3080  case Hexagon::S2_storew_locked:
3081  case Hexagon::S4_stored_locked:
3082  return false;
3083  }
3084  return true;
3085 }
3086 
3087 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
3088  return Opcode == Hexagon::J2_jumpt ||
3089  Opcode == Hexagon::J2_jumptpt ||
3090  Opcode == Hexagon::J2_jumpf ||
3091  Opcode == Hexagon::J2_jumpfpt ||
3092  Opcode == Hexagon::J2_jumptnew ||
3093  Opcode == Hexagon::J2_jumpfnew ||
3094  Opcode == Hexagon::J2_jumptnewpt ||
3095  Opcode == Hexagon::J2_jumpfnewpt;
3096 }
3097 
3099  if (Cond.empty() || !isPredicated(Cond[0].getImm()))
3100  return false;
3101  return !isPredicatedTrue(Cond[0].getImm());
3102 }
3103 
3105  const uint64_t F = MI.getDesc().TSFlags;
3107 }
3108 
3109 // Returns the base register in a memory access (load/store). The offset is
3110 // returned in Offset and the access size is returned in AccessSize.
3111 // If the base operand has a subregister or the offset field does not contain
3112 // an immediate value, return nullptr.
3114  int64_t &Offset,
3115  unsigned &AccessSize) const {
3116  // Return if it is not a base+offset type instruction or a MemOp.
3119  !isMemOp(MI) && !isPostIncrement(MI))
3120  return nullptr;
3121 
3122  AccessSize = getMemAccessSize(MI);
3123 
3124  unsigned BasePos = 0, OffsetPos = 0;
3125  if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
3126  return nullptr;
3127 
3128  // Post increment updates its EA after the mem access,
3129  // so we need to treat its offset as zero.
3130  if (isPostIncrement(MI)) {
3131  Offset = 0;
3132  } else {
3133  const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
3134  if (!OffsetOp.isImm())
3135  return nullptr;
3136  Offset = OffsetOp.getImm();
3137  }
3138 
3139  const MachineOperand &BaseOp = MI.getOperand(BasePos);
3140  if (BaseOp.getSubReg() != 0)
3141  return nullptr;
3142  return &const_cast<MachineOperand&>(BaseOp);
3143 }
3144 
3145 /// Return the position of the base and offset operands for this instruction.
3147  unsigned &BasePos, unsigned &OffsetPos) const {
3148  if (!isAddrModeWithOffset(MI) && !isPostIncrement(MI))
3149  return false;
3150 
3151  // Deal with memops first.
3152  if (isMemOp(MI)) {
3153  BasePos = 0;
3154  OffsetPos = 1;
3155  } else if (MI.mayStore()) {
3156  BasePos = 0;
3157  OffsetPos = 1;
3158  } else if (MI.mayLoad()) {
3159  BasePos = 1;
3160  OffsetPos = 2;
3161  } else
3162  return false;
3163 
3164  if (isPredicated(MI)) {
3165  BasePos++;
3166  OffsetPos++;
3167  }
3168  if (isPostIncrement(MI)) {
3169  BasePos++;
3170  OffsetPos++;
3171  }
3172 
3173  if (!MI.getOperand(BasePos).isReg() || !MI.getOperand(OffsetPos).isImm())
3174  return false;
3175 
3176  return true;
3177 }
3178 
3179 // Inserts branching instructions in reverse order of their occurrence.
3180 // e.g. jump_t t1 (i1)
3181 // jump t2 (i2)
3182 // Jumpers = {i2, i1}
3184  MachineBasicBlock& MBB) const {
3186  // If the block has no terminators, it just falls into the block after it.
3188  if (I == MBB.instr_begin())
3189  return Jumpers;
3190 
3191  // A basic block may looks like this:
3192  //
3193  // [ insn
3194  // EH_LABEL
3195  // insn
3196  // insn
3197  // insn
3198  // EH_LABEL
3199  // insn ]
3200  //
3201  // It has two succs but does not have a terminator
3202  // Don't know how to handle it.
3203  do {
3204  --I;
3205  if (I->isEHLabel())
3206  return Jumpers;
3207  } while (I != MBB.instr_begin());
3208 
3209  I = MBB.instr_end();
3210  --I;
3211 
3212  while (I->isDebugInstr()) {
3213  if (I == MBB.instr_begin())
3214  return Jumpers;
3215  --I;
3216  }
3217  if (!isUnpredicatedTerminator(*I))
3218  return Jumpers;
3219 
3220  // Get the last instruction in the block.
3221  MachineInstr *LastInst = &*I;
3222  Jumpers.push_back(LastInst);
3223  MachineInstr *SecondLastInst = nullptr;
3224  // Find one more terminator if present.
3225  do {
3226  if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
3227  if (!SecondLastInst) {
3228  SecondLastInst = &*I;
3229  Jumpers.push_back(SecondLastInst);
3230  } else // This is a third branch.
3231  return Jumpers;
3232  }
3233  if (I == MBB.instr_begin())
3234  break;
3235  --I;
3236  } while (true);
3237  return Jumpers;
3238 }
3239 
3240 // Returns Operand Index for the constant extended instruction.
3242  const uint64_t F = MI.getDesc().TSFlags;
3244 }
3245 
3246 // See if instruction could potentially be a duplex candidate.
3247 // If so, return its group. Zero otherwise.
3249  const MachineInstr &MI) const {
3250  unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3251 
3252  switch (MI.getOpcode()) {
3253  default:
3254  return HexagonII::HCG_None;
3255  //
3256  // Compound pairs.
3257  // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3258  // "Rd16=#U6 ; jump #r9:2"
3259  // "Rd16=Rs16 ; jump #r9:2"
3260  //
3261  case Hexagon::C2_cmpeq:
3262  case Hexagon::C2_cmpgt:
3263  case Hexagon::C2_cmpgtu:
3264  DstReg = MI.getOperand(0).getReg();
3265  Src1Reg = MI.getOperand(1).getReg();
3266  Src2Reg = MI.getOperand(2).getReg();
3267  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3268  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3269  isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3270  return HexagonII::HCG_A;
3271  break;
3272  case Hexagon::C2_cmpeqi:
3273  case Hexagon::C2_cmpgti:
3274  case Hexagon::C2_cmpgtui:
3275  // P0 = cmp.eq(Rs,#u2)
3276  DstReg = MI.getOperand(0).getReg();
3277  SrcReg = MI.getOperand(1).getReg();
3278  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3279  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3280  isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3281  ((isUInt<5>(MI.getOperand(2).getImm())) ||
3282  (MI.getOperand(2).getImm() == -1)))
3283  return HexagonII::HCG_A;
3284  break;
3285  case Hexagon::A2_tfr:
3286  // Rd = Rs
3287  DstReg = MI.getOperand(0).getReg();
3288  SrcReg = MI.getOperand(1).getReg();
3289  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3290  return HexagonII::HCG_A;
3291  break;
3292  case Hexagon::A2_tfrsi:
3293  // Rd = #u6
3294  // Do not test for #u6 size since the const is getting extended
3295  // regardless and compound could be formed.
3296  DstReg = MI.getOperand(0).getReg();
3297  if (isIntRegForSubInst(DstReg))
3298  return HexagonII::HCG_A;
3299  break;
3300  case Hexagon::S2_tstbit_i:
3301  DstReg = MI.getOperand(0).getReg();
3302  Src1Reg = MI.getOperand(1).getReg();
3303  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3304  (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3305  MI.getOperand(2).isImm() &&
3306  isIntRegForSubInst(Src1Reg) && (MI.getOperand(2).getImm() == 0))
3307  return HexagonII::HCG_A;
3308  break;
3309  // The fact that .new form is used pretty much guarantees
3310  // that predicate register will match. Nevertheless,
3311  // there could be some false positives without additional
3312  // checking.
3313  case Hexagon::J2_jumptnew:
3314  case Hexagon::J2_jumpfnew:
3315  case Hexagon::J2_jumptnewpt:
3316  case Hexagon::J2_jumpfnewpt:
3317  Src1Reg = MI.getOperand(0).getReg();
3318  if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3319  (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3320  return HexagonII::HCG_B;
3321  break;
3322  // Transfer and jump:
3323  // Rd=#U6 ; jump #r9:2
3324  // Rd=Rs ; jump #r9:2
3325  // Do not test for jump range here.
3326  case Hexagon::J2_jump:
3327  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3328  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3329  return HexagonII::HCG_C;
3330  }
3331 
3332  return HexagonII::HCG_None;
3333 }
3334 
3335 // Returns -1 when there is no opcode found.
3337  const MachineInstr &GB) const {
3340  if ((GA.getOpcode() != Hexagon::C2_cmpeqi) ||
3341  (GB.getOpcode() != Hexagon::J2_jumptnew))
3342  return -1u;
3343  unsigned DestReg = GA.getOperand(0).getReg();
3344  if (!GB.readsRegister(DestReg))
3345  return -1u;
3346  if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
3347  return -1u;
3348  // The value compared against must be either u5 or -1.
3349  const MachineOperand &CmpOp = GA.getOperand(2);
3350  if (!CmpOp.isImm())
3351  return -1u;
3352  int V = CmpOp.getImm();
3353  if (V == -1)
3354  return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqn1_tp0_jump_nt
3355  : Hexagon::J4_cmpeqn1_tp1_jump_nt;
3356  if (!isUInt<5>(V))
3357  return -1u;
3358  return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqi_tp0_jump_nt
3359  : Hexagon::J4_cmpeqi_tp1_jump_nt;
3360 }
3361 
3362 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3363  enum Hexagon::PredSense inPredSense;
3364  inPredSense = invertPredicate ? Hexagon::PredSense_false :
3365  Hexagon::PredSense_true;
3366  int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3367  if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3368  return CondOpcode;
3369 
3370  llvm_unreachable("Unexpected predicable instruction");
3371 }
3372 
3373 // Return the cur value instruction for a given store.
3375  switch (MI.getOpcode()) {
3376  default: llvm_unreachable("Unknown .cur type");
3377  case Hexagon::V6_vL32b_pi:
3378  return Hexagon::V6_vL32b_cur_pi;
3379  case Hexagon::V6_vL32b_ai:
3380  return Hexagon::V6_vL32b_cur_ai;
3381  case Hexagon::V6_vL32b_nt_pi:
3382  return Hexagon::V6_vL32b_nt_cur_pi;
3383  case Hexagon::V6_vL32b_nt_ai:
3384  return Hexagon::V6_vL32b_nt_cur_ai;
3385  }
3386  return 0;
3387 }
3388 
3389 // Return the regular version of the .cur instruction.
3391  switch (MI.getOpcode()) {
3392  default: llvm_unreachable("Unknown .cur type");
3393  case Hexagon::V6_vL32b_cur_pi:
3394  return Hexagon::V6_vL32b_pi;
3395  case Hexagon::V6_vL32b_cur_ai:
3396  return Hexagon::V6_vL32b_ai;
3397  case Hexagon::V6_vL32b_nt_cur_pi:
3398  return Hexagon::V6_vL32b_nt_pi;
3399  case Hexagon::V6_vL32b_nt_cur_ai:
3400  return Hexagon::V6_vL32b_nt_ai;
3401  }
3402  return 0;
3403 }
3404 
3405 // The diagram below shows the steps involved in the conversion of a predicated
3406 // store instruction to its .new predicated new-value form.
3407 //
3408 // Note: It doesn't include conditional new-value stores as they can't be
3409 // converted to .new predicate.
3410 //
3411 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3412 // ^ ^
3413 // / \ (not OK. it will cause new-value store to be
3414 // / X conditional on p0.new while R2 producer is
3415 // / \ on p0)
3416 // / \.
3417 // p.new store p.old NV store
3418 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
3419 // ^ ^
3420 // \ /
3421 // \ /
3422 // \ /
3423 // p.old store
3424 // [if (p0)memw(R0+#0)=R2]
3425 //
3426 // The following set of instructions further explains the scenario where
3427 // conditional new-value store becomes invalid when promoted to .new predicate
3428 // form.
3429 //
3430 // { 1) if (p0) r0 = add(r1, r2)
3431 // 2) p0 = cmp.eq(r3, #0) }
3432 //
3433 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
3434 // the first two instructions because in instr 1, r0 is conditional on old value
3435 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3436 // is not valid for new-value stores.
3437 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3438 // from the "Conditional Store" list. Because a predicated new value store
3439 // would NOT be promoted to a double dot new store. See diagram below:
3440 // This function returns yes for those stores that are predicated but not
3441 // yet promoted to predicate dot new instructions.
3442 //
3443 // +---------------------+
3444 // /-----| if (p0) memw(..)=r0 |---------\~
3445 // || +---------------------+ ||
3446 // promote || /\ /\ || promote
3447 // || /||\ /||\ ||
3448 // \||/ demote || \||/
3449 // \/ || || \/
3450 // +-------------------------+ || +-------------------------+
3451 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
3452 // +-------------------------+ || +-------------------------+
3453 // || || ||
3454 // || demote \||/
3455 // promote || \/ NOT possible
3456 // || || /\~
3457 // \||/ || /||\~
3458 // \/ || ||
3459 // +-----------------------------+
3460 // | if (p0.new) memw(..)=r0.new |
3461 // +-----------------------------+
3462 // Double Dot New Store
3463 //
3464 // Returns the most basic instruction for the .new predicated instructions and
3465 // new-value stores.
3466 // For example, all of the following instructions will be converted back to the
3467 // same instruction:
3468 // 1) if (p0.new) memw(R0+#0) = R1.new --->
3469 // 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
3470 // 3) if (p0.new) memw(R0+#0) = R1 --->
3471 //
3472 // To understand the translation of instruction 1 to its original form, consider
3473 // a packet with 3 instructions.
3474 // { p0 = cmp.eq(R0,R1)
3475 // if (p0.new) R2 = add(R3, R4)
3476 // R5 = add (R3, R1)
3477 // }
3478 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3479 //
3480 // This instruction can be part of the previous packet only if both p0 and R2
3481 // are promoted to .new values. This promotion happens in steps, first
3482 // predicate register is promoted to .new and in the next iteration R2 is
3483 // promoted. Therefore, in case of dependence check failure (due to R5) during
3484 // next iteration, it should be converted back to its most basic form.
3485 
3486 // Return the new value instruction for a given store.
3488  int NVOpcode = Hexagon::getNewValueOpcode(MI.getOpcode());
3489  if (NVOpcode >= 0) // Valid new-value store instruction.
3490  return NVOpcode;
3491 
3492  switch (MI.getOpcode()) {
3493  default:
3494  report_fatal_error(std::string("Unknown .new type: ") +
3495  std::to_string(MI.getOpcode()));
3496  case Hexagon::S4_storerb_ur:
3497  return Hexagon::S4_storerbnew_ur;
3498 
3499  case Hexagon::S2_storerb_pci:
3500  return Hexagon::S2_storerb_pci;
3501 
3502  case Hexagon::S2_storeri_pci:
3503  return Hexagon::S2_storeri_pci;
3504 
3505  case Hexagon::S2_storerh_pci:
3506  return Hexagon::S2_storerh_pci;
3507 
3508  case Hexagon::S2_storerd_pci:
3509  return Hexagon::S2_storerd_pci;
3510 
3511  case Hexagon::S2_storerf_pci:
3512  return Hexagon::S2_storerf_pci;
3513 
3514  case Hexagon::V6_vS32b_ai:
3515  return Hexagon::V6_vS32b_new_ai;
3516 
3517  case Hexagon::V6_vS32b_pi:
3518  return Hexagon::V6_vS32b_new_pi;
3519  }
3520  return 0;
3521 }
3522 
3523 // Returns the opcode to use when converting MI, which is a conditional jump,
3524 // into a conditional instruction which uses the .new value of the predicate.
3525 // We also use branch probabilities to add a hint to the jump.
3526 // If MBPI is null, all edges will be treated as equally likely for the
3527 // purposes of establishing a predication hint.
3529  const MachineBranchProbabilityInfo *MBPI) const {
3530  // We assume that block can have at most two successors.
3531  const MachineBasicBlock *Src = MI.getParent();
3532  const MachineOperand &BrTarget = MI.getOperand(1);
3533  bool Taken = false;
3534  const BranchProbability OneHalf(1, 2);
3535 
3536  auto getEdgeProbability = [MBPI] (const MachineBasicBlock *Src,
3537  const MachineBasicBlock *Dst) {
3538  if (MBPI)
3539  return MBPI->getEdgeProbability(Src, Dst);
3540  return BranchProbability(1, Src->succ_size());
3541  };
3542 
3543  if (BrTarget.isMBB()) {
3544  const MachineBasicBlock *Dst = BrTarget.getMBB();
3545  Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3546  } else {
3547  // The branch target is not a basic block (most likely a function).
3548  // Since BPI only gives probabilities for targets that are basic blocks,
3549  // try to identify another target of this branch (potentially a fall-
3550  // -through) and check the probability of that target.
3551  //
3552  // The only handled branch combinations are:
3553  // - one conditional branch,
3554  // - one conditional branch followed by one unconditional branch.
3555  // Otherwise, assume not-taken.
3557  const MachineBasicBlock &B = *MI.getParent();
3558  bool SawCond = false, Bad = false;
3559  for (const MachineInstr &I : B) {
3560  if (!I.isBranch())
3561  continue;
3562  if (I.isConditionalBranch()) {
3563  SawCond = true;
3564  if (&I != &MI) {
3565  Bad = true;
3566  break;
3567  }
3568  }
3569  if (I.isUnconditionalBranch() && !SawCond) {
3570  Bad = true;
3571  break;
3572  }
3573  }
3574  if (!Bad) {
3576  MachineBasicBlock::const_instr_iterator NextIt = std::next(It);
3577  if (NextIt == B.instr_end()) {
3578  // If this branch is the last, look for the fall-through block.
3579  for (const MachineBasicBlock *SB : B.successors()) {
3580  if (!B.isLayoutSuccessor(SB))
3581  continue;
3582  Taken = getEdgeProbability(Src, SB) < OneHalf;
3583  break;
3584  }
3585  } else {
3586  assert(NextIt->isUnconditionalBranch());
3587  // Find the first MBB operand and assume it's the target.
3588  const MachineBasicBlock *BT = nullptr;
3589  for (const MachineOperand &Op : NextIt->operands()) {
3590  if (!Op.isMBB())
3591  continue;
3592  BT = Op.getMBB();
3593  break;
3594  }
3595  Taken = BT && getEdgeProbability(Src, BT) < OneHalf;
3596  }
3597  } // if (!Bad)
3598  }
3599 
3600  // The Taken flag should be set to something reasonable by this point.
3601 
3602  switch (MI.getOpcode()) {
3603  case Hexagon::J2_jumpt:
3604  return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3605  case Hexagon::J2_jumpf:
3606  return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3607 
3608  default:
3609  llvm_unreachable("Unexpected jump instruction.");
3610  }
3611 }
3612 
3613 // Return .new predicate version for an instruction.
3615  const MachineBranchProbabilityInfo *MBPI) const {
3616  switch (MI.getOpcode()) {
3617  // Condtional Jumps
3618  case Hexagon::J2_jumpt:
3619  case Hexagon::J2_jumpf:
3620  return getDotNewPredJumpOp(MI, MBPI);
3621  }
3622 
3623  int NewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
3624  if (NewOpcode >= 0)
3625  return NewOpcode;
3626  return 0;
3627 }
3628 
3630  int NewOp = MI.getOpcode();
3631  if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3632  NewOp = Hexagon::getPredOldOpcode(NewOp);
3633  // All Hexagon architectures have prediction bits on dot-new branches,
3634  // but only Hexagon V60+ has prediction bits on dot-old ones. Make sure
3635  // to pick the right opcode when converting back to dot-old.
3636  if (!Subtarget.getFeatureBits()[Hexagon::ArchV60]) {
3637  switch (NewOp) {
3638  case Hexagon::J2_jumptpt:
3639  NewOp = Hexagon::J2_jumpt;
3640  break;
3641  case Hexagon::J2_jumpfpt:
3642  NewOp = Hexagon::J2_jumpf;
3643  break;
3644  case Hexagon::J2_jumprtpt:
3645  NewOp = Hexagon::J2_jumprt;
3646  break;
3647  case Hexagon::J2_jumprfpt:
3648  NewOp = Hexagon::J2_jumprf;
3649  break;
3650  }
3651  }
3652  assert(NewOp >= 0 &&
3653  "Couldn't change predicate new instruction to its old form.");
3654  }
3655 
3656  if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3657  NewOp = Hexagon::getNonNVStore(NewOp);
3658  assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
3659  }
3660 
3661  if (Subtarget.hasV60Ops())
3662  return NewOp;
3663 
3664  // Subtargets prior to V60 didn't support 'taken' forms of predicated jumps.
3665  switch (NewOp) {
3666  case Hexagon::J2_jumpfpt:
3667  return Hexagon::J2_jumpf;
3668  case Hexagon::J2_jumptpt:
3669  return Hexagon::J2_jumpt;
3670  case Hexagon::J2_jumprfpt:
3671  return Hexagon::J2_jumprf;
3672  case Hexagon::J2_jumprtpt:
3673  return Hexagon::J2_jumprt;
3674  }
3675  return NewOp;
3676 }
3677 
3678 // See if instruction could potentially be a duplex candidate.
3679 // If so, return its group. Zero otherwise.
3681  const MachineInstr &MI) const {
3682  unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3683  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3684 
3685  switch (MI.getOpcode()) {
3686  default:
3687  return HexagonII::HSIG_None;
3688  //
3689  // Group L1:
3690  //
3691  // Rd = memw(Rs+#u4:2)
3692  // Rd = memub(Rs+#u4:0)
3693  case Hexagon::L2_loadri_io:
3694  DstReg = MI.getOperand(0).getReg();
3695  SrcReg = MI.getOperand(1).getReg();
3696  // Special case this one from Group L2.
3697  // Rd = memw(r29+#u5:2)
3698  if (isIntRegForSubInst(DstReg)) {
3699  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3700  HRI.getStackRegister() == SrcReg &&
3701  MI.getOperand(2).isImm() &&
3702  isShiftedUInt<5,2>(MI.getOperand(2).getImm()))
3703  return HexagonII::HSIG_L2;
3704  // Rd = memw(Rs+#u4:2)
3705  if (isIntRegForSubInst(SrcReg) &&
3706  (MI.getOperand(2).isImm() &&
3707  isShiftedUInt<4,2>(MI.getOperand(2).getImm())))
3708  return HexagonII::HSIG_L1;
3709  }
3710  break;
3711  case Hexagon::L2_loadrub_io:
3712  // Rd = memub(Rs+#u4:0)
3713  DstReg = MI.getOperand(0).getReg();
3714  SrcReg = MI.getOperand(1).getReg();
3715  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3716  MI.getOperand(2).isImm() && isUInt<4>(MI.getOperand(2).getImm()))
3717  return HexagonII::HSIG_L1;
3718  break;
3719  //
3720  // Group L2:
3721  //
3722  // Rd = memh/memuh(Rs+#u3:1)
3723  // Rd = memb(Rs+#u3:0)
3724  // Rd = memw(r29+#u5:2) - Handled above.
3725  // Rdd = memd(r29+#u5:3)
3726  // deallocframe
3727  // [if ([!]p0[.new])] dealloc_return
3728  // [if ([!]p0[.new])] jumpr r31
3729  case Hexagon::L2_loadrh_io:
3730  case Hexagon::L2_loadruh_io:
3731  // Rd = memh/memuh(Rs+#u3:1)
3732  DstReg = MI.getOperand(0).getReg();
3733  SrcReg = MI.getOperand(1).getReg();
3734  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3735  MI.getOperand(2).isImm() &&
3736  isShiftedUInt<3,1>(MI.getOperand(2).getImm()))
3737  return HexagonII::HSIG_L2;
3738  break;
3739  case Hexagon::L2_loadrb_io:
3740  // Rd = memb(Rs+#u3:0)
3741  DstReg = MI.getOperand(0).getReg();
3742  SrcReg = MI.getOperand(1).getReg();
3743  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3744  MI.getOperand(2).isImm() &&
3745  isUInt<3>(MI.getOperand(2).getImm()))
3746  return HexagonII::HSIG_L2;
3747  break;
3748  case Hexagon::L2_loadrd_io:
3749  // Rdd = memd(r29+#u5:3)
3750  DstReg = MI.getOperand(0).getReg();
3751  SrcReg = MI.getOperand(1).getReg();
3752  if (isDblRegForSubInst(DstReg, HRI) &&
3753  Hexagon::IntRegsRegClass.contains(SrcReg) &&
3754  HRI.getStackRegister() == SrcReg &&
3755  MI.getOperand(2).isImm() &&
3756  isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
3757  return HexagonII::HSIG_L2;
3758  break;
3759  // dealloc_return is not documented in Hexagon Manual, but marked
3760  // with A_SUBINSN attribute in iset_v4classic.py.
3761  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3762  case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3763  case Hexagon::L4_return:
3764  case Hexagon::L2_deallocframe:
3765  return HexagonII::HSIG_L2;
3766  case Hexagon::EH_RETURN_JMPR:
3767  case Hexagon::PS_jmpret:
3768  case Hexagon::SL2_jumpr31:
3769  // jumpr r31
3770  // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0
3771  DstReg = MI.getOperand(0).getReg();
3772  if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3773  return HexagonII::HSIG_L2;
3774  break;
3775  case Hexagon::PS_jmprett:
3776  case Hexagon::PS_jmpretf:
3777  case Hexagon::PS_jmprettnewpt:
3778  case Hexagon::PS_jmpretfnewpt:
3779  case Hexagon::PS_jmprettnew:
3780  case Hexagon::PS_jmpretfnew:
3781  case Hexagon::SL2_jumpr31_t:
3782  case Hexagon::SL2_jumpr31_f:
3783  case Hexagon::SL2_jumpr31_tnew:
3784  DstReg = MI.getOperand(1).getReg();
3785  SrcReg = MI.getOperand(0).getReg();
3786  // [if ([!]p0[.new])] jumpr r31
3787  if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
3788  (Hexagon::P0 == SrcReg)) &&
3789  (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
3790  return HexagonII::HSIG_L2;
3791  break;
3792  case Hexagon::L4_return_t:
3793  case Hexagon::L4_return_f:
3794  case Hexagon::L4_return_tnew_pnt:
3795  case Hexagon::L4_return_fnew_pnt:
3796  case Hexagon::L4_return_tnew_pt:
3797  case Hexagon::L4_return_fnew_pt:
3798  // [if ([!]p0[.new])] dealloc_return
3799  SrcReg = MI.getOperand(0).getReg();
3800  if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
3801  return HexagonII::HSIG_L2;
3802  break;
3803  //
3804  // Group S1:
3805  //
3806  // memw(Rs+#u4:2) = Rt
3807  // memb(Rs+#u4:0) = Rt
3808  case Hexagon::S2_storeri_io:
3809  // Special case this one from Group S2.
3810  // memw(r29+#u5:2) = Rt
3811  Src1Reg = MI.getOperand(0).getReg();
3812  Src2Reg = MI.getOperand(2).getReg();
3813  if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3814  isIntRegForSubInst(Src2Reg) &&
3815  HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
3816  isShiftedUInt<5,2>(MI.getOperand(1).getImm()))
3817  return HexagonII::HSIG_S2;
3818  // memw(Rs+#u4:2) = Rt
3819  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3820  MI.getOperand(1).isImm() &&
3821  isShiftedUInt<4,2>(MI.getOperand(1).getImm()))
3822  return HexagonII::HSIG_S1;
3823  break;
3824  case Hexagon::S2_storerb_io:
3825  // memb(Rs+#u4:0) = Rt
3826  Src1Reg = MI.getOperand(0).getReg();
3827  Src2Reg = MI.getOperand(2).getReg();
3828  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3829  MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()))
3830  return HexagonII::HSIG_S1;
3831  break;
3832  //
3833  // Group S2:
3834  //
3835  // memh(Rs+#u3:1) = Rt
3836  // memw(r29+#u5:2) = Rt
3837  // memd(r29+#s6:3) = Rtt
3838  // memw(Rs+#u4:2) = #U1
3839  // memb(Rs+#u4) = #U1
3840  // allocframe(#u5:3)
3841  case Hexagon::S2_storerh_io:
3842  // memh(Rs+#u3:1) = Rt
3843  Src1Reg = MI.getOperand(0).getReg();
3844  Src2Reg = MI.getOperand(2).getReg();
3845  if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3846  MI.getOperand(1).isImm() &&
3847  isShiftedUInt<3,1>(MI.getOperand(1).getImm()))
3848  return HexagonII::HSIG_S1;
3849  break;
3850  case Hexagon::S2_storerd_io:
3851  // memd(r29+#s6:3) = Rtt
3852  Src1Reg = MI.getOperand(0).getReg();
3853  Src2Reg = MI.getOperand(2).getReg();
3854  if (isDblRegForSubInst(Src2Reg, HRI) &&
3855  Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3856  HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
3857  isShiftedInt<6,3>(MI.getOperand(1).getImm()))
3858  return HexagonII::HSIG_S2;
3859  break;
3860  case Hexagon::S4_storeiri_io:
3861  // memw(Rs+#u4:2) = #U1
3862  Src1Reg = MI.getOperand(0).getReg();
3863  if (isIntRegForSubInst(Src1Reg) && MI.getOperand(1).isImm() &&
3864  isShiftedUInt<4,2>(MI.getOperand(1).getImm()) &&
3865  MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
3866  return HexagonII::HSIG_S2;
3867  break;
3868  case Hexagon::S4_storeirb_io:
3869  // memb(Rs+#u4) = #U1
3870  Src1Reg = MI.getOperand(0).getReg();
3871  if (isIntRegForSubInst(Src1Reg) &&
3872  MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()) &&
3873  MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
3874  return HexagonII::HSIG_S2;
3875  break;
3876  case Hexagon::S2_allocframe:
3877  if (MI.getOperand(2).isImm() &&
3878  isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
3879  return HexagonII::HSIG_S1;
3880  break;
3881  //
3882  // Group A:
3883  //
3884  // Rx = add(Rx,#s7)
3885  // Rd = Rs
3886  // Rd = #u6
3887  // Rd = #-1
3888  // if ([!]P0[.new]) Rd = #0
3889  // Rd = add(r29,#u6:2)
3890  // Rx = add(Rx,Rs)
3891  // P0 = cmp.eq(Rs,#u2)
3892  // Rdd = combine(#0,Rs)
3893  // Rdd = combine(Rs,#0)
3894  // Rdd = combine(#u2,#U2)
3895  // Rd = add(Rs,#1)
3896  // Rd = add(Rs,#-1)
3897  // Rd = sxth/sxtb/zxtb/zxth(Rs)
3898  // Rd = and(Rs,#1)
3899  case Hexagon::A2_addi:
3900  DstReg = MI.getOperand(0).getReg();
3901  SrcReg = MI.getOperand(1).getReg();
3902  if (isIntRegForSubInst(DstReg)) {
3903  // Rd = add(r29,#u6:2)
3904  if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3905  HRI.getStackRegister() == SrcReg && MI.getOperand(2).isImm() &&
3906  isShiftedUInt<6,2>(MI.getOperand(2).getImm()))
3907  return HexagonII::HSIG_A;
3908  // Rx = add(Rx,#s7)
3909  if ((DstReg == SrcReg) && MI.getOperand(2).isImm() &&
3910  isInt<7>(MI.getOperand(2).getImm()))
3911  return HexagonII::HSIG_A;
3912  // Rd = add(Rs,#1)
3913  // Rd = add(Rs,#-1)
3914  if (isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3915  ((MI.getOperand(2).getImm() == 1) ||
3916  (MI.getOperand(2).getImm() == -1)))
3917  return HexagonII::HSIG_A;
3918  }
3919  break;
3920  case Hexagon::A2_add:
3921  // Rx = add(Rx,Rs)
3922  DstReg = MI.getOperand(0).getReg();
3923  Src1Reg = MI.getOperand(1).getReg();
3924  Src2Reg = MI.getOperand(2).getReg();
3925  if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
3926  isIntRegForSubInst(Src2Reg))
3927  return HexagonII::HSIG_A;
3928  break;
3929  case Hexagon::A2_andir:
3930  // Same as zxtb.
3931  // Rd16=and(Rs16,#255)
3932  // Rd16=and(Rs16,#1)
3933  DstReg = MI.getOperand(0).getReg();
3934  SrcReg = MI.getOperand(1).getReg();
3935  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3936  MI.getOperand(2).isImm() &&
3937  ((MI.getOperand(2).getImm() == 1) ||
3938  (MI.getOperand(2).getImm() == 255)))
3939  return HexagonII::HSIG_A;
3940  break;
3941  case Hexagon::A2_tfr:
3942  // Rd = Rs
3943  DstReg = MI.getOperand(0).getReg();
3944  SrcReg = MI.getOperand(1).getReg();
3945  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3946  return HexagonII::HSIG_A;
3947  break;
3948  case Hexagon::A2_tfrsi:
3949  // Rd = #u6
3950  // Do not test for #u6 size since the const is getting extended
3951  // regardless and compound could be formed.
3952  // Rd = #-1
3953  DstReg = MI.getOperand(0).getReg();
3954  if (isIntRegForSubInst(DstReg))
3955  return HexagonII::HSIG_A;
3956  break;
3957  case Hexagon::C2_cmoveit:
3958  case Hexagon::C2_cmovenewit:
3959  case Hexagon::C2_cmoveif:
3960  case Hexagon::C2_cmovenewif:
3961  // if ([!]P0[.new]) Rd = #0
3962  // Actual form:
3963  // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
3964  DstReg = MI.getOperand(0).getReg();
3965  SrcReg = MI.getOperand(1).getReg();
3966  if (isIntRegForSubInst(DstReg) &&
3967  Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
3968  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0)
3969  return HexagonII::HSIG_A;
3970  break;
3971  case Hexagon::C2_cmpeqi:
3972  // P0 = cmp.eq(Rs,#u2)
3973  DstReg = MI.getOperand(0).getReg();
3974  SrcReg = MI.getOperand(1).getReg();
3975  if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3976  Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
3977  MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm()))
3978  return HexagonII::HSIG_A;
3979  break;
3980  case Hexagon::A2_combineii:
3981  case Hexagon::A4_combineii:
3982  // Rdd = combine(#u2,#U2)
3983  DstReg = MI.getOperand(0).getReg();
3984  if (isDblRegForSubInst(DstReg, HRI) &&
3985  ((MI.getOperand(1).isImm() && isUInt<2>(MI.getOperand(1).getImm())) ||
3986  (MI.getOperand(1).isGlobal() &&
3987  isUInt<2>(MI.getOperand(1).getOffset()))) &&
3988  ((MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm())) ||
3989  (MI.getOperand(2).isGlobal() &&
3990  isUInt<2>(MI.getOperand(2).getOffset()))))
3991  return HexagonII::HSIG_A;
3992  break;
3993  case Hexagon::A4_combineri:
3994  // Rdd = combine(Rs,#0)
3995  DstReg = MI.getOperand(0).getReg();
3996  SrcReg = MI.getOperand(1).getReg();
3997  if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3998  ((MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) ||
3999  (MI.getOperand(2).isGlobal() && MI.getOperand(2).getOffset() == 0)))
4000  return HexagonII::HSIG_A;
4001  break;
4002  case Hexagon::A4_combineir:
4003  // Rdd = combine(#0,Rs)
4004  DstReg = MI.getOperand(0).getReg();
4005  SrcReg = MI.getOperand(2).getReg();
4006  if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4007  ((MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) ||
4008  (MI.getOperand(1).isGlobal() && MI.getOperand(1).getOffset() == 0)))
4009  return HexagonII::HSIG_A;
4010  break;
4011  case Hexagon::A2_sxtb:
4012  case Hexagon::A2_sxth:
4013  case Hexagon::A2_zxtb:
4014  case Hexagon::A2_zxth:
4015  // Rd = sxth/sxtb/zxtb/zxth(Rs)
4016  DstReg = MI.getOperand(0).getReg();
4017  SrcReg = MI.getOperand(1).getReg();
4018  if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4019  return HexagonII::HSIG_A;
4020  break;
4021  }
4022 
4023  return HexagonII::HSIG_None;
4024 }
4025 
4027  return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real);
4028 }
4029 
4031  const InstrItineraryData *ItinData, const MachineInstr &MI) const {
4032  // Default to one cycle for no itinerary. However, an "empty" itinerary may
4033  // still have a MinLatency property, which getStageLatency checks.
4034  if (!ItinData)
4035  return getInstrLatency(ItinData, MI);
4036 
4037  if (MI.isTransient())
4038  return 0;
4039  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
4040 }
4041 
4042 /// getOperandLatency - Compute and return the use operand latency of a given
4043 /// pair of def and use.
4044 /// In most cases, the static scheduling itinerary was enough to determine the
4045 /// operand latency. But it may not be possible for instructions with variable
4046 /// number of defs / uses.
4047 ///
4048 /// This is a raw interface to the itinerary that may be directly overriden by
4049 /// a target. Use computeOperandLatency to get the best estimate of latency.
4051  const MachineInstr &DefMI,
4052  unsigned DefIdx,
4053  const MachineInstr &UseMI,
4054  unsigned UseIdx) const {
4055  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4056 
4057  // Get DefIdx and UseIdx for super registers.
4058  const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4059 
4060  if (DefMO.isReg() && HRI.isPhysicalRegister(DefMO.getReg())) {
4061  if (DefMO.isImplicit()) {
4062  for (MCSuperRegIterator SR(DefMO.getReg(), &HRI); SR.isValid(); ++SR) {
4063  int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &HRI);
4064  if (Idx != -1) {
4065  DefIdx = Idx;
4066  break;
4067  }
4068  }
4069  }
4070 
4071  const MachineOperand &UseMO = UseMI.getOperand(UseIdx);
4072  if (UseMO.isImplicit()) {
4073  for (MCSuperRegIterator SR(UseMO.getReg(), &HRI); SR.isValid(); ++SR) {
4074  int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &HRI);
4075  if (Idx != -1) {
4076  UseIdx = Idx;
4077  break;
4078  }
4079  }
4080  }
4081  }
4082 
4083  int Latency = TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
4084  UseMI, UseIdx);
4085  if (!Latency)
4086  // We should never have 0 cycle latency between two instructions unless
4087  // they can be packetized together. However, this decision can't be made
4088  // here.
4089  Latency = 1;
4090  return Latency;
4091 }
4092 
4093 // inverts the predication logic.
4094 // p -> NotP
4095 // NotP -> P
4097  SmallVectorImpl<MachineOperand> &Cond) const {
4098  if (Cond.empty())
4099  return false;
4100  unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
4101  Cond[0].setImm(Opc);
4102  return true;
4103 }
4104 
4105 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
4106  int InvPredOpcode;
4107  InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
4108  : Hexagon::getTruePredOpcode(Opc);
4109  if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
4110  return InvPredOpcode;
4111 
4112  llvm_unreachable("Unexpected predicated instruction");
4113 }
4114 
4115 // Returns the max value that doesn't need to be extended.
4117  const uint64_t F = MI.getDesc().TSFlags;
4118  unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4120  unsigned bits = (F >> HexagonII::ExtentBitsPos)
4122 
4123  if (isSigned) // if value is signed
4124  return ~(-1U << (bits - 1));
4125  else
4126  return ~(-1U << bits);
4127 }
4128 
4129 
4131  switch (MI.getOpcode()) {
4132  case Hexagon::L2_loadrbgp:
4133  case Hexagon::L2_loadrdgp:
4134  case Hexagon::L2_loadrhgp:
4135  case Hexagon::L2_loadrigp:
4136  case Hexagon::L2_loadrubgp:
4137  case Hexagon::L2_loadruhgp:
4138  case Hexagon::S2_storerbgp:
4139  case Hexagon::S2_storerbnewgp:
4140  case Hexagon::S2_storerhgp:
4141  case Hexagon::S2_storerhnewgp:
4142  case Hexagon::S2_storerigp:
4143  case Hexagon::S2_storerinewgp:
4144  case Hexagon::S2_storerdgp:
4145  case Hexagon::S2_storerfgp:
4146  return true;
4147  }
4148  const uint64_t F = MI.getDesc().TSFlags;
4149  unsigned addrMode =
4151  // Disallow any base+offset instruction. The assembler does not yet reorder
4152  // based up any zero offset instruction.
4153  return (addrMode == HexagonII::BaseRegOffset ||
4154  addrMode == HexagonII::BaseImmOffset ||
4155  addrMode == HexagonII::BaseLongOffset);
4156 }
4157 
4159  using namespace HexagonII;
4160 
4161  const uint64_t F = MI.getDesc().TSFlags;
4162  unsigned S = (F >> MemAccessSizePos) & MemAccesSizeMask;
4164  if (Size != 0)
4165  return Size;
4166 
4167  // Handle vector access sizes.
4168  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4169  switch (S) {
4171  return HRI.getSpillSize(Hexagon::HvxVRRegClass);
4172  default:
4173  llvm_unreachable("Unexpected instruction");
4174  }
4175 }
4176 
4177 // Returns the min value that doesn't need to be extended.
4179  const uint64_t F = MI.getDesc().TSFlags;
4180  unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4182  unsigned bits = (F >> HexagonII::ExtentBitsPos)
4184 
4185  if (isSigned) // if value is signed
4186  return -1U << (bits - 1);
4187  else
4188  return 0;
4189 }
4190 
4191 // Returns opcode of the non-extended equivalent instruction.
4193  // Check if the instruction has a register form that uses register in place
4194  // of the extended operand, if so return that as the non-extended form.
4195  short NonExtOpcode = Hexagon::getRegForm(MI.getOpcode());
4196  if (NonExtOpcode >= 0)
4197  return NonExtOpcode;
4198 
4199  if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
4200  // Check addressing mode and retrieve non-ext equivalent instruction.
4201  switch (getAddrMode(MI)) {
4202  case HexagonII::Absolute:
4203  return Hexagon::changeAddrMode_abs_io(MI.getOpcode());
4205  return Hexagon::changeAddrMode_io_rr(MI.getOpcode());
4207  return Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
4208 
4209  default:
4210  return -1;
4211  }
4212  }
4213  return -1;
4214 }
4215 
4217  unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
4218  if (Cond.empty())
4219  return false;
4220  assert(Cond.size() == 2);
4221  if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
4222  LLVM_DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
4223  return false;
4224  }
4225  PredReg = Cond[1].getReg();
4226  PredRegPos = 1;
4227  // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
4228  PredRegFlags = 0;
4229  if (Cond[1].isImplicit())
4230  PredRegFlags = RegState::Implicit;
4231  if (Cond[1].isUndef())
4232  PredRegFlags |= RegState::Undef;
4233  return true;
4234 }
4235 
4237  return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Pseudo);
4238 }
4239 
4241  return Hexagon::getRegForm(MI.getOpcode());
4242 }
4243 
4244 // Return the number of bytes required to encode the instruction.
4245 // Hexagon instructions are fixed length, 4 bytes, unless they
4246 // use a constant extender, which requires another 4 bytes.
4247 // For debug instructions and prolog labels, return 0.
4249  if (MI.isDebugInstr() || MI.isPosition())
4250  return 0;
4251 
4252  unsigned Size = MI.getDesc().getSize();
4253  if (!Size)
4254  // Assume the default insn size in case it cannot be determined
4255  // for whatever reason.
4256  Size = HEXAGON_INSTR_SIZE;
4257 
4258  if (isConstExtended(MI) || isExtended(MI))
4259  Size += HEXAGON_INSTR_SIZE;
4260 
4261  // Try and compute number of instructions in asm.
4263  const MachineBasicBlock &MBB = *MI.getParent();
4264  const MachineFunction *MF = MBB.getParent();
4265  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
4266 
4267  // Count the number of register definitions to find the asm string.
4268  unsigned NumDefs = 0;
4269  for (; MI.getOperand(NumDefs).isReg() && MI.getOperand(NumDefs).isDef();
4270  ++NumDefs)
4271  assert(NumDefs != MI.getNumOperands()-2 && "No asm string?");
4272 
4273  assert(MI.getOperand(NumDefs).isSymbol() && "No asm string?");
4274  // Disassemble the AsmStr and approximate number of instructions.
4275  const char *AsmStr = MI.getOperand(NumDefs).getSymbolName();
4276  Size = getInlineAsmLength(AsmStr, *MAI);
4277  }
4278 
4279  return Size;
4280 }
4281 
4283  const uint64_t F = MI.getDesc().TSFlags;
4284  return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
4285 }
4286 
4288  const InstrItineraryData &II = *Subtarget.getInstrItineraryData();
4289  const InstrStage &IS = *II.beginStage(MI.getDesc().getSchedClass());
4290 
4291  return IS.getUnits();
4292 }
4293 
4294 // Calculate size of the basic block without debug instructions.
4296  return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4297 }
4298 
4300  MachineBasicBlock::const_iterator BundleHead) const {
4301  assert(BundleHead->isBundle() && "Not a bundle header");
4302  auto MII = BundleHead.getInstrIterator();
4303  // Skip the bundle header.
4304  return nonDbgMICount(++MII, getBundleEnd(BundleHead.getInstrIterator()));
4305 }
4306 
4307 /// immediateExtend - Changes the instruction in place to one using an immediate
4308 /// extender.
4310  assert((isExtendable(MI)||isConstExtended(MI)) &&
4311  "Instruction must be extendable");
4312  // Find which operand is extendable.
4313  short ExtOpNum = getCExtOpNum(MI);
4314  MachineOperand &MO = MI.getOperand(ExtOpNum);
4315  // This needs to be something we understand.
4316  assert((MO.isMBB() || MO.isImm()) &&
4317  "Branch with unknown extendable field type");
4318  // Mark given operand as extended.
4320 }
4321 
4323  MachineInstr &MI, MachineBasicBlock *NewTarget) const {
4324  LLVM_DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "
4325  << printMBBReference(*NewTarget);
4326  MI.dump(););
4327  assert(MI.isBranch());
4328  unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
4329  int TargetPos = MI.getNumOperands() - 1;
4330  // In general branch target is the last operand,
4331  // but some implicit defs added at the end might change it.
4332  while ((TargetPos > -1) && !MI.getOperand(TargetPos).isMBB())
4333  --TargetPos;
4334  assert((TargetPos >= 0) && MI.getOperand(TargetPos).isMBB());
4335  MI.getOperand(TargetPos).setMBB(NewTarget);
4337  NewOpcode = reversePrediction(NewOpcode);
4338  }
4339  MI.setDesc(get(NewOpcode));
4340  return true;
4341 }
4342 
4344  /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4346  MachineBasicBlock &B = *A;
4348  DebugLoc DL = I->getDebugLoc();
4349  MachineInstr *NewMI;
4350 
4351  for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4352  insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4353  NewMI = BuildMI(B, I, DL, get(insn));
4354  LLVM_DEBUG(dbgs() << "\n"
4355  << getName(NewMI->getOpcode())
4356  << " Class: " << NewMI->getDesc().getSchedClass());
4357  NewMI->eraseFromParent();
4358  }
4359  /* --- The code above is used to generate complete set of Hexagon Insn --- */
4360 }
4361 
4362 // inverts the predication logic.
4363 // p -> NotP
4364 // NotP -> P
4366  LLVM_DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump());
4368  return true;
4369 }
4370 
4371 // Reverse the branch prediction.
4372 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4373  int PredRevOpcode = -1;
4374  if (isPredictedTaken(Opcode))
4375  PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4376  else
4377  PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4378  assert(PredRevOpcode > 0);
4379  return PredRevOpcode;
4380 }
4381 
4382 // TODO: Add more rigorous validation.
4384  const {
4385  return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4386 }
4387 
4388 void HexagonInstrInfo::
4390  assert(MIB->isBundle());
4391  MachineOperand &Operand = MIB->getOperand(0);
4392  if (Operand.isImm())
4393  Operand.setImm(Operand.getImm() | memShufDisabledMask);
4394  else
4395  MIB->addOperand(MachineOperand::CreateImm(memShufDisabledMask));
4396 }
4397 
4399  assert(MIB.isBundle());
4400  const MachineOperand &Operand = MIB.getOperand(0);
4401  return (Operand.isImm() && (Operand.getImm() & memShufDisabledMask) != 0);
4402 }
4403 
4404 // Addressing mode relations.
4406  return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4407 }
4408 
4410  return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4411 }
4412 
4414  return Opc >= 0 ? Hexagon::changeAddrMode_io_pi(Opc) : Opc;
4415 }
4416 
4418  return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4419 }
4420 
4422  return Opc >= 0 ? Hexagon::changeAddrMode_pi_io(Opc) : Opc;
4423 }
4424 
4426  return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4427 }
4428 
4430  return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4431 }
4432 
4434  return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
4435 }
static bool isReg(const MCInst &MI, unsigned OpNo)
unsigned getTargetFlags() const
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
short getNonExtOpcode(const MachineInstr &MI) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Return an array that contains the direct target flag values and their names.
bool isVecALU(const MachineInstr &MI) const
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool DefinesPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred) const override
If the specified instruction defines any predicate or condition code register(s) used for predication...
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:636
short changeAddrMode_rr_io(short Opc) const
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
instr_iterator instr_begin()
const int Hexagon_MEMH_OFFSET_MAX
bool is_TC2early(unsigned SchedClass)
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:633
unsigned nonDbgBBSize(const MachineBasicBlock *BB) const
getInstrTimingClassLatency - Compute the instruction latency of a given instruction using Timing Clas...
instr_iterator instr_end()
MachineBasicBlock * getMBB() const
const int Hexagon_ADDI_OFFSET_MAX
unsigned getFrameRegister(const MachineFunction &MF) const override
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
const int Hexagon_MEMH_OFFSET_MIN
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
This class represents lattice values for constants.
Definition: AllocatorList.h:24
const InstrStage * beginStage(unsigned ItinClassIndx) const
Return the first stage of the itinerary.
static void parseOperands(const MachineInstr &MI, SmallVector< unsigned, 4 > &Defs, SmallVector< unsigned, 8 > &Uses)
Gather register def/uses from MI.
static cl::opt< bool > UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Use the DFA based hazard recognizer."))
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
void setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const
short getEquivalentHWInstr(const MachineInstr &MI) const
DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &STI) const override
Create machine specific model for scheduling.
void push_back(const T &Elt)
Definition: SmallVector.h:218
bool isAbsoluteSet(const MachineInstr &MI) const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
bool isJumpR(const MachineInstr &MI) const
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
Returns true if the first specified predicate subsumes the second, e.g.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:164
unsigned getUnits() const
Returns the choice of FUs.
bool isConstExtended(const MachineInstr &MI) const
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
bool getInvertedPredSense(SmallVectorImpl< MachineOperand > &Cond) const
Address of indexed Jump Table for switch.
unsigned Reg
unsigned nonDbgBundleSize(MachineBasicBlock::const_iterator BundleHead) const
unsigned getSubReg() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
bool isInlineAsm() const
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
int getMaxValue(const MachineInstr &MI) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
Return an array that contains the bitmask target flag values and their names.
const int Hexagon_ADDI_OFFSET_MIN
bool reversePredSense(MachineInstr &MI) const
int getDotNewPredOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
demanded bits
MachineBasicBlock reference.
#define HEXAGON_INSTR_SIZE
bool isExpr(unsigned OpType) const
bool isTailCall(const MachineInstr &MI) const override
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
F(f)
unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr *> &PrevInsts, unsigned Iter, unsigned MaxIter) const override
Generate code to reduce the loop iteration by one and check if the loop is finished.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool producesStall(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
MachineInstrBundleIterator< const MachineInstr > const_iterator
const HexagonFrameLowering * getFrameLowering() const override
unsigned getMemAccessSize(const MachineInstr &MI) const
const int Hexagon_MEMD_OFFSET_MAX
unsigned getSize(const MachineInstr &MI) const
int getDotCurOp(const MachineInstr &MI) const
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:399
bool isLateResultInstr(const MachineInstr &MI) const
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:459
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool is_TC1(unsigned SchedClass)
void immediateExtend(MachineInstr &MI) const
immediateExtend - Changes the instruction in place to one using an immediate extender.
int getDotNewPredJumpOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
short changeAddrMode_ur_rr(short Opc) const
bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const override
Check if the instruction or the bundle of instructions has load from stack slots. ...
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
return AArch64::GPR64RegClass contains(Reg)
iterator_range< succ_iterator > successors()
bool isToBeScheduledASAP(const MachineInstr &MI1, const MachineInstr &MI2) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
Insert a noop into the instruction stream at the specified point.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e...
bool isHVXVec(const MachineInstr &MI) const
static cl::opt< bool > BranchRelaxAsmLarge("branch-relax-asm-large", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"))
bool isComplex(const MachineInstr &MI) const
unsigned getSpillAlignment(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class...
static cl::opt< bool > DisableNVSchedule("disable-hexagon-nv-schedule", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Disable schedule adjustment for new value stores."))
A description of a memory reference used in the backend.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
unsigned getInvertedPredicatedOpcode(const int Opc) const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MCSuperRegIterator enumerates all super-registers of Reg.
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:412
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_NODISCARD size_t count(char C) const
Return the number of occurrences of C in the string.
Definition: StringRef.h:476
unsigned getCompoundOpcode(const MachineInstr &GA, const MachineInstr &GB) const
bool isPredicatedNew(const MachineInstr &MI) const
bool canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const
Can these instructions execute at the same time in a bundle.
bool predOpcodeHasNot(ArrayRef< MachineOperand > Cond) const
const HexagonRegisterInfo * getRegisterInfo() const override
MachineBasicBlock * getBottomBlock()
Return the "bottom" block in the loop, which is the last block in the linear layout, ignoring any parts of the loop not contiguous with the part that contains the header.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
bool isVecUsableNextPacket(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
Name of external global symbol.
static StringRef getName(Value *V)
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool isDotNewInst(const MachineInstr &MI) const
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
bool isDeallocRet(const MachineInstr &MI) const
bool isExtended(const MachineInstr &MI) const
bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const override
Check if the instruction or the bundle of instructions has store to stack slots.
const char * getSymbolName() const
bool is_TC2(unsigned SchedClass)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:667
bool isSolo(const MachineInstr &MI) const
bool isLateInstrFeedsEarlyInstr(const MachineInstr &LRMI, const MachineInstr &ESMI) const
MachineBasicBlock::instr_iterator expandVGatherPseudo(MachineInstr &MI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
This function is called for all pseudo instructions that remain after register allocation.
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Cond) const override
Convert the instruction into a predicated instruction.
bool predCanBeUsedAsDotNew(const MachineInstr &MI, unsigned PredReg) const
MachineInstr * findLoopInstr(MachineBasicBlock *BB, unsigned EndLoopOp, MachineBasicBlock *TargetBB, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
SmallVector< MachineInstr *, 2 > getBranchingInstrs(MachineBasicBlock &MBB) const
bool doesNotReturn(const MachineInstr &CallMI) const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:406
bool isIndirectBranch(QueryType Type=AnyInBundle) const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MachineInstr.h:663
bool isEndLoopN(unsigned Opcode) const
bool isBundle() const
bool isCompoundBranchInstr(const MachineInstr &MI) const
void clearKillFlags(unsigned Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
short changeAddrMode_io_rr(short Opc) const
bool isPredictedTaken(unsigned Opcode) const
int getMinValue(const MachineInstr &MI) const
bool getBundleNoShuf(const MachineInstr &MIB) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Itinerary data supplied by a subtarget to be used by a target.
short changeAddrMode_io_pi(short Opc) const
bool isTC1(const MachineInstr &MI) const
unsigned getUndefRegState(bool B)
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
Compute the instruction latency of a given instruction.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
short changeAddrMode_rr_ur(short Opc) const
reverse_iterator rbegin()
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:657
bool getPredReg(ArrayRef< MachineOperand > Cond, unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const
BasicBlockListType::iterator iterator
unsigned getKillRegState(bool B)
virtual const InstrItineraryData * getInstrItineraryData() const
getInstrItineraryData - Returns instruction itinerary data for the target or specific subtarget...
unsigned getCExtOpNum(const MachineInstr &MI) const
const int Hexagon_MEMD_OFFSET_MIN
MachineOperand * getBaseAndOffset(const MachineInstr &MI, int64_t &Offset, unsigned &AccessSize) const
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:623
bool is_TC3x(unsigned SchedClass)
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
static cl::opt< bool > EnableBranchPrediction("hexagon-enable-branch-prediction", cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"))
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
Return true if it&#39;s profitable for if-converter to duplicate instructions of specified accumulated in...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:577
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:820
Address of a global value.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
const int Hexagon_MEMW_OFFSET_MAX
Constants for Hexagon instructions.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:429
Machine Value Type.
static cl::opt< bool > EnableALUForwarding("enable-alu-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec alu forwarding"))
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
bool getIncrementValue(const MachineInstr &MI, int &Value) const override
If the instruction is an increment of a constant value, return the amount.
bool isCompare(QueryType Type=IgnoreBundle) const
Return true if this instruction is a comparison.
Definition: MachineInstr.h:694
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const override
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool validateBranchCond(const ArrayRef< MachineOperand > &Cond) const
MachineInstrBuilder & UseMI
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
Definition: MCAsmInfo.h:480
bool isPredicable(const MachineInstr &MI) const override
Return true if the specified instruction can be predicated.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:129
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE and DBG_LABEL instructions...
bool isAccumulator(const MachineInstr &MI) const
bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const override
For instructions with a base and offset, return the position of the base register and offset operands...
bool isZeroExtendingLoad(const MachineInstr &MI) const
short changeAddrMode_io_abs(short Opc) const
bool hasUncondBranch(const MachineBasicBlock *B) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
short changeAddrMode_abs_io(short Opc) const
unsigned getAddrMode(const MachineInstr &MI) const
int getNonDotCurOp(const MachineInstr &MI) const
bool invertAndChangeJumpTarget(MachineInstr &MI, MachineBasicBlock *NewTarget) const
void setMBB(MachineBasicBlock *MBB)
bool isTC2Early(const MachineInstr &MI) const
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
Address of a basic block.
static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI)
bool isValidAutoIncImm(const EVT VT, const int Offset) const
StringRef getCommentString() const
Definition: MCAsmInfo.h:486
bool isFloat(const MachineInstr &MI) const
void setImm(int64_t immVal)
self_iterator getIterator()
Definition: ilist_node.h:82
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:308
iterator_range< pred_iterator > predecessors()
HexagonII::SubInstructionGroup getDuplexCandidateGroup(const MachineInstr &MI) const
void genAllInsnTimingClasses(MachineFunction &MF) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
short getPseudoInstrPair(const MachineInstr &MI) const
MCSubRegIterator enumerates all sub-registers of Reg.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
bool isEarlySourceInstr(const MachineInstr &MI) const
bool isPostIncrement(const MachineInstr &MI) const override
Return true for post-incremented instructions.
bool isDebugInstr() const
Definition: MachineInstr.h:999
bool isExtendable(const MachineInstr &MI) const
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MO_LO16 - On a symbol operand, this represents a relocation containing lower 16 bit of the address...
Definition: ARMBaseInfo.h:241
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
static cl::opt< bool > EnableACCForwarding("enable-acc-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec acc forwarding"))
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
void setIsKill(bool Val=true)
bool hasPseudoInstrPair(const MachineInstr &MI) const
The memory access writes data.
bool isIndirectCall(const MachineInstr &MI) const
bool isTC4x(const MachineInstr &MI) const
bool isDotCurInst(const MachineInstr &MI) const
static bool isUndef(ArrayRef< int > Mask)
static bool isIntRegForSubInst(unsigned Reg)
bool isConditionalBranch(QueryType Type=AnyInBundle) const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MachineInstr.h:671
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
bool isNewValueStore(const MachineInstr &MI) const
HexagonInstrInfo(HexagonSubtarget &ST)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Decompose the machine operand&#39;s target flags into two values - the direct target flag value and any o...
bool isNewValueInst(const MachineInstr &MI) const
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
MachineOperand class - Representation of each machine instruction operand.
bool isVecAcc(const MachineInstr &MI) const
MachineInstrBuilder MachineInstrBuilder & DefMI
bool hasEHLabel(const MachineBasicBlock *B) const
bool isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
unsigned getUnits(const MachineInstr &MI) const
short getRegForm(const MachineInstr &MI) const
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
static void getLiveRegsAt(LivePhysRegs &Regs, const MachineInstr &MI)
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
uint64_t getType(const MachineInstr &MI) const
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
Return true if it&#39;s profitable to predicate instructions with accumulated instruction latency of "Num...
bool isSignExtendingLoad(const MachineInstr &MI) const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:226
bool isOperandExtended(const MachineInstr &MI, unsigned OperandNum) const
unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const override
Measure the specified inline asm to determine an approximation of its length.
int64_t getImm() const
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
bool isDuplexPair(const MachineInstr &MIa, const MachineInstr &MIb) const
Symmetrical. See if these two instructions are fit for duplex pair.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:539
bool isLateSourceInstr(const MachineInstr &MI) const
bool isSpillPredRegOp(const MachineInstr &MI) const
bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const override
Get the base register and byte offset of a load/store instr.
unsigned getMaxInstLength() const
Definition: MCAsmInfo.h:477
static bool isDuplexPairMatch(unsigned Ga, unsigned Gb)
unsigned const TypeCVI_FIRST
unsigned createVR(MachineFunction *MF, MVT VT) const
HexagonInstrInfo specifics.
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Test if the given instruction should be considered a scheduling boundary.
unsigned reversePrediction(unsigned Opcode) const
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
bool isNewValue(const MachineInstr &MI) const
int findRegisterDefOperandIdx(unsigned Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a def of the specified register or -1 if it is not found...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isSaveCalleeSavedRegsCall(const MachineInstr &MI) const
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
static LLVM_ATTRIBUTE_UNUSED unsigned getMemAccessSizeInBytes(MemAccessSize S)
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool mayStore() const
Return true if this instruction could possibly modify memory.
Definition: MCInstrDesc.h:405
unsigned const TypeCVI_LAST
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
unsigned succ_size() const
int getDotNewOp(const MachineInstr &MI) const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
int getCondOpcode(int Opc, bool sense) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
bool isPredicateLate(unsigned Opcode) const
Representation of each machine instruction.
Definition: MachineInstr.h:64
bool addLatencyToSchedule(const MachineInstr &MI1, const MachineInstr &MI2) const
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isDependent(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:133
void addTargetFlag(unsigned F)
static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB, MachineBasicBlock::const_instr_iterator MIE)
Calculate number of instructions excluding the debug instructions.
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const int Hexagon_MEMB_OFFSET_MAX
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:49
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
These values represent a non-pipelined step in the execution of an instruction.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:465
int getDotOldOp(const MachineInstr &MI) const
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isValidOffset(unsigned Opcode, int Offset, const TargetRegisterInfo *TRI, bool Extend=true) const
static MachineOperand CreateImm(int64_t Val)
bool mayBeNewStore(const MachineInstr &MI) const
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isPredicatedTrue(const MachineInstr &MI) const
uint32_t Size
Definition: Profile.cpp:47
const int Hexagon_MEMW_OFFSET_MIN
bool isNewValueJump(const MachineInstr &MI) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool hasNonExtEquivalent(const MachineInstr &MI) const
unsigned getInstrTimingClassLatency(const InstrItineraryData *ItinData, const MachineInstr &MI) const
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
int getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
getOperandLatency - Compute and return the use operand latency of a given pair of def and use...
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Instructions::iterator instr_iterator
const std::string to_string(const T &Value)
Definition: ScopedPrinter.h:62
bool isTC2(const MachineInstr &MI) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:807
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isBranch(unsigned Opcode)
bool is_TC4x(unsigned SchedClass)
bool isLoopN(const MachineInstr &MI) const
bool isHVXMemWithAIndirect(const MachineInstr &I, const MachineInstr &J) const
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
Definition: MCInstrDesc.h:271
LLVM Value Representation.
Definition: Value.h:73
bool PredOpcodeHasJMP_c(unsigned Opcode) const
bool isPosition() const
Definition: MachineInstr.h:995
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
bool mayBeCurLoad(const MachineInstr &MI) const
bool isAddrModeWithOffset(const MachineInstr &MI) const
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:204
static cl::opt< bool > EnableTimingClassLatency("enable-timing-class-latency", cl::Hidden, cl::init(false), cl::desc("Enable timing class latency"))
short changeAddrMode_pi_io(short Opc) const
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
Address of indexed Constant in Constant Pool.
bool isMemOp(const MachineInstr &MI) const
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
bool isBaseImmOffset(const MachineInstr &MI) const
bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const override
Analyze the loop code, return true if it cannot be understood.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
int findRegisterUseOperandIdx(unsigned Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a use of the specific register or -1 if it is not found...
Instructions::const_iterator const_instr_iterator
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
bool isIndirectL4Return(const MachineInstr &MI) const
bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA=nullptr) const override
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
MO_HI16 - On a symbol operand, this represents a relocation containing higher 16 bit of the address...
Definition: ARMBaseInfo.h:245
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
const int Hexagon_MEMB_OFFSET_MIN
cl::opt< bool > ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden, cl::init(false), cl::desc("Do not consider inline-asm a scheduling/" "packetization boundary."))
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
Definition: MCInstrDesc.h:581
bool isImplicit() const
HexagonII::CompoundGroup getCompoundCandidateGroup(const MachineInstr &MI) const