LLVM  8.0.1
HexagonMachineScheduler.cpp
Go to the documentation of this file.
1 //===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "HexagonInstrInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/IR/Function.h"
35 #include "llvm/Support/Debug.h"
37 #include <algorithm>
38 #include <cassert>
39 #include <iomanip>
40 #include <limits>
41 #include <memory>
42 #include <sstream>
43 
44 using namespace llvm;
45 
46 #define DEBUG_TYPE "machine-scheduler"
47 
48 static cl::opt<bool> IgnoreBBRegPressure("ignore-bb-reg-pressure",
50 
51 static cl::opt<bool> UseNewerCandidate("use-newer-candidate",
53 
54 static cl::opt<unsigned> SchedDebugVerboseLevel("misched-verbose-level",
56 
57 // Check if the scheduler should penalize instructions that are available to
58 // early due to a zero-latency dependence.
59 static cl::opt<bool> CheckEarlyAvail("check-early-avail", cl::Hidden,
60  cl::ZeroOrMore, cl::init(true));
61 
62 // This value is used to determine if a register class is a high pressure set.
63 // We compute the maximum number of registers needed and divided by the total
64 // available. Then, we compare the result to this value.
65 static cl::opt<float> RPThreshold("hexagon-reg-pressure", cl::Hidden,
66  cl::init(0.75f), cl::desc("High register pressure threhold."));
67 
68 /// Return true if there is a dependence between SUd and SUu.
69 static bool hasDependence(const SUnit *SUd, const SUnit *SUu,
70  const HexagonInstrInfo &QII) {
71  if (SUd->Succs.size() == 0)
72  return false;
73 
74  // Enable .cur formation.
75  if (QII.mayBeCurLoad(*SUd->getInstr()))
76  return false;
77 
78  if (QII.canExecuteInBundle(*SUd->getInstr(), *SUu->getInstr()))
79  return false;
80 
81  for (const auto &S : SUd->Succs) {
82  // Since we do not add pseudos to packets, might as well
83  // ignore order dependencies.
84  if (S.isCtrl())
85  continue;
86 
87  if (S.getSUnit() == SUu && S.getLatency() > 0)
88  return true;
89  }
90  return false;
91 }
92 
93 /// Check if scheduling of this SU is possible
94 /// in the current packet.
95 /// It is _not_ precise (statefull), it is more like
96 /// another heuristic. Many corner cases are figured
97 /// empirically.
99  if (!SU || !SU->getInstr())
100  return false;
101 
102  // First see if the pipeline could receive this instruction
103  // in the current cycle.
104  switch (SU->getInstr()->getOpcode()) {
105  default:
106  if (!ResourcesModel->canReserveResources(*SU->getInstr()))
107  return false;
108  break;
109  case TargetOpcode::EXTRACT_SUBREG:
110  case TargetOpcode::INSERT_SUBREG:
111  case TargetOpcode::SUBREG_TO_REG:
112  case TargetOpcode::REG_SEQUENCE:
113  case TargetOpcode::IMPLICIT_DEF:
114  case TargetOpcode::COPY:
116  break;
117  }
118 
119  MachineBasicBlock *MBB = SU->getInstr()->getParent();
120  auto &QST = MBB->getParent()->getSubtarget<HexagonSubtarget>();
121  const auto &QII = *QST.getInstrInfo();
122 
123  // Now see if there are no other dependencies to instructions already
124  // in the packet.
125  if (IsTop) {
126  for (unsigned i = 0, e = Packet.size(); i != e; ++i)
127  if (hasDependence(Packet[i], SU, QII))
128  return false;
129  } else {
130  for (unsigned i = 0, e = Packet.size(); i != e; ++i)
131  if (hasDependence(SU, Packet[i], QII))
132  return false;
133  }
134  return true;
135 }
136 
137 /// Keep track of available resources.
139  bool startNewCycle = false;
140  // Artificially reset state.
141  if (!SU) {
142  ResourcesModel->clearResources();
143  Packet.clear();
144  TotalPackets++;
145  return false;
146  }
147  // If this SU does not fit in the packet or the packet is now full
148  // start a new one.
149  if (!isResourceAvailable(SU, IsTop) ||
150  Packet.size() >= SchedModel->getIssueWidth()) {
151  ResourcesModel->clearResources();
152  Packet.clear();
153  TotalPackets++;
154  startNewCycle = true;
155  }
156 
157  switch (SU->getInstr()->getOpcode()) {
158  default:
159  ResourcesModel->reserveResources(*SU->getInstr());
160  break;
161  case TargetOpcode::EXTRACT_SUBREG:
162  case TargetOpcode::INSERT_SUBREG:
163  case TargetOpcode::SUBREG_TO_REG:
164  case TargetOpcode::REG_SEQUENCE:
165  case TargetOpcode::IMPLICIT_DEF:
166  case TargetOpcode::KILL:
167  case TargetOpcode::CFI_INSTRUCTION:
169  case TargetOpcode::COPY:
171  break;
172  }
173  Packet.push_back(SU);
174 
175 #ifndef NDEBUG
176  LLVM_DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
177  for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
178  LLVM_DEBUG(dbgs() << "\t[" << i << "] SU(");
179  LLVM_DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
180  LLVM_DEBUG(Packet[i]->getInstr()->dump());
181  }
182 #endif
183 
184  return startNewCycle;
185 }
186 
187 /// schedule - Called back from MachineScheduler::runOnMachineFunction
188 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
189 /// only includes instructions that have DAG nodes, not scheduling boundaries.
191  LLVM_DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
192  << printMBBReference(*BB) << " " << BB->getName()
193  << " in_func " << BB->getParent()->getName()
194  << " at loop depth " << MLI->getLoopDepth(BB) << " \n");
195 
196  buildDAGWithRegPressure();
197 
198  Topo.InitDAGTopologicalSorting();
199 
200  // Postprocess the DAG to add platform-specific artificial dependencies.
201  postprocessDAG();
202 
203  SmallVector<SUnit*, 8> TopRoots, BotRoots;
204  findRootsAndBiasEdges(TopRoots, BotRoots);
205 
206  // Initialize the strategy before modifying the DAG.
207  SchedImpl->initialize(this);
208 
209  LLVM_DEBUG(unsigned maxH = 0;
210  for (unsigned su = 0, e = SUnits.size(); su != e;
211  ++su) if (SUnits[su].getHeight() > maxH) maxH =
212  SUnits[su].getHeight();
213  dbgs() << "Max Height " << maxH << "\n";);
214  LLVM_DEBUG(unsigned maxD = 0;
215  for (unsigned su = 0, e = SUnits.size(); su != e;
216  ++su) if (SUnits[su].getDepth() > maxD) maxD =
217  SUnits[su].getDepth();
218  dbgs() << "Max Depth " << maxD << "\n";);
219  LLVM_DEBUG(dump());
220 
221  initQueues(TopRoots, BotRoots);
222 
223  bool IsTopNode = false;
224  while (true) {
225  LLVM_DEBUG(
226  dbgs() << "** VLIWMachineScheduler::schedule picking next node\n");
227  SUnit *SU = SchedImpl->pickNode(IsTopNode);
228  if (!SU) break;
229 
230  if (!checkSchedLimit())
231  break;
232 
233  scheduleMI(SU, IsTopNode);
234 
235  // Notify the scheduling strategy after updating the DAG.
236  SchedImpl->schedNode(SU, IsTopNode);
237 
238  updateQueues(SU, IsTopNode);
239  }
240  assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
241 
242  placeDebugValues();
243 
244  LLVM_DEBUG({
245  dbgs() << "*** Final schedule for "
246  << printMBBReference(*begin()->getParent()) << " ***\n";
247  dumpSchedule();
248  dbgs() << '\n';
249  });
250 }
251 
253  DAG = static_cast<VLIWMachineScheduler*>(dag);
254  SchedModel = DAG->getSchedModel();
255 
256  Top.init(DAG, SchedModel);
257  Bot.init(DAG, SchedModel);
258 
259  // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
260  // are disabled, then these HazardRecs will be disabled.
261  const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
262  const TargetSubtargetInfo &STI = DAG->MF.getSubtarget();
263  const TargetInstrInfo *TII = STI.getInstrInfo();
264  delete Top.HazardRec;
265  delete Bot.HazardRec;
266  Top.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
267  Bot.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
268 
269  delete Top.ResourceModel;
270  delete Bot.ResourceModel;
271  Top.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
272  Bot.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
273 
274  const std::vector<unsigned> &MaxPressure =
275  DAG->getRegPressure().MaxSetPressure;
276  HighPressureSets.assign(MaxPressure.size(), 0);
277  for (unsigned i = 0, e = MaxPressure.size(); i < e; ++i) {
278  unsigned Limit = DAG->getRegClassInfo()->getRegPressureSetLimit(i);
279  HighPressureSets[i] =
280  ((float) MaxPressure[i] > ((float) Limit * RPThreshold));
281  }
282 
284  "-misched-topdown incompatible with -misched-bottomup");
285 }
286 
288  if (SU->isScheduled)
289  return;
290 
291  for (const SDep &PI : SU->Preds) {
292  unsigned PredReadyCycle = PI.getSUnit()->TopReadyCycle;
293  unsigned MinLatency = PI.getLatency();
294 #ifndef NDEBUG
295  Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
296 #endif
297  if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
298  SU->TopReadyCycle = PredReadyCycle + MinLatency;
299  }
300  Top.releaseNode(SU, SU->TopReadyCycle);
301 }
302 
304  if (SU->isScheduled)
305  return;
306 
307  assert(SU->getInstr() && "Scheduled SUnit must have instr");
308 
309  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
310  I != E; ++I) {
311  unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
312  unsigned MinLatency = I->getLatency();
313 #ifndef NDEBUG
314  Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
315 #endif
316  if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
317  SU->BotReadyCycle = SuccReadyCycle + MinLatency;
318  }
319  Bot.releaseNode(SU, SU->BotReadyCycle);
320 }
321 
322 /// Does this SU have a hazard within the current instruction group.
323 ///
324 /// The scheduler supports two modes of hazard recognition. The first is the
325 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
326 /// supports highly complicated in-order reservation tables
327 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
328 ///
329 /// The second is a streamlined mechanism that checks for hazards based on
330 /// simple counters that the scheduler itself maintains. It explicitly checks
331 /// for instruction dispatch limitations, including the number of micro-ops that
332 /// can dispatch per cycle.
333 ///
334 /// TODO: Also check whether the SU must start a new group.
335 bool ConvergingVLIWScheduler::VLIWSchedBoundary::checkHazard(SUnit *SU) {
336  if (HazardRec->isEnabled())
337  return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
338 
339  unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
340  if (IssueCount + uops > SchedModel->getIssueWidth())
341  return true;
342 
343  return false;
344 }
345 
346 void ConvergingVLIWScheduler::VLIWSchedBoundary::releaseNode(SUnit *SU,
347  unsigned ReadyCycle) {
348  if (ReadyCycle < MinReadyCycle)
349  MinReadyCycle = ReadyCycle;
350 
351  // Check for interlocks first. For the purpose of other heuristics, an
352  // instruction that cannot issue appears as if it's not in the ReadyQueue.
353  if (ReadyCycle > CurrCycle || checkHazard(SU))
354 
355  Pending.push(SU);
356  else
357  Available.push(SU);
358 }
359 
360 /// Move the boundary of scheduled code by one cycle.
361 void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
362  unsigned Width = SchedModel->getIssueWidth();
363  IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
364 
365  assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
366  "MinReadyCycle uninitialized");
367  unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
368 
369  if (!HazardRec->isEnabled()) {
370  // Bypass HazardRec virtual calls.
371  CurrCycle = NextCycle;
372  } else {
373  // Bypass getHazardType calls in case of long latency.
374  for (; CurrCycle != NextCycle; ++CurrCycle) {
375  if (isTop())
376  HazardRec->AdvanceCycle();
377  else
378  HazardRec->RecedeCycle();
379  }
380  }
381  CheckPending = true;
382 
383  LLVM_DEBUG(dbgs() << "*** Next cycle " << Available.getName() << " cycle "
384  << CurrCycle << '\n');
385 }
386 
387 /// Move the boundary of scheduled code by one SUnit.
388 void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit *SU) {
389  bool startNewCycle = false;
390 
391  // Update the reservation table.
392  if (HazardRec->isEnabled()) {
393  if (!isTop() && SU->isCall) {
394  // Calls are scheduled with their preceding instructions. For bottom-up
395  // scheduling, clear the pipeline state before emitting.
396  HazardRec->Reset();
397  }
398  HazardRec->EmitInstruction(SU);
399  }
400 
401  // Update DFA model.
402  startNewCycle = ResourceModel->reserveResources(SU, isTop());
403 
404  // Check the instruction group dispatch limit.
405  // TODO: Check if this SU must end a dispatch group.
406  IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
407  if (startNewCycle) {
408  LLVM_DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
409  bumpCycle();
410  }
411  else
412  LLVM_DEBUG(dbgs() << "*** IssueCount " << IssueCount << " at cycle "
413  << CurrCycle << '\n');
414 }
415 
416 /// Release pending ready nodes in to the available queue. This makes them
417 /// visible to heuristics.
418 void ConvergingVLIWScheduler::VLIWSchedBoundary::releasePending() {
419  // If the available queue is empty, it is safe to reset MinReadyCycle.
420  if (Available.empty())
421  MinReadyCycle = std::numeric_limits<unsigned>::max();
422 
423  // Check to see if any of the pending instructions are ready to issue. If
424  // so, add them to the available queue.
425  for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
426  SUnit *SU = *(Pending.begin()+i);
427  unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
428 
429  if (ReadyCycle < MinReadyCycle)
430  MinReadyCycle = ReadyCycle;
431 
432  if (ReadyCycle > CurrCycle)
433  continue;
434 
435  if (checkHazard(SU))
436  continue;
437 
438  Available.push(SU);
439  Pending.remove(Pending.begin()+i);
440  --i; --e;
441  }
442  CheckPending = false;
443 }
444 
445 /// Remove SU from the ready set for this boundary.
446 void ConvergingVLIWScheduler::VLIWSchedBoundary::removeReady(SUnit *SU) {
447  if (Available.isInQueue(SU))
448  Available.remove(Available.find(SU));
449  else {
450  assert(Pending.isInQueue(SU) && "bad ready count");
451  Pending.remove(Pending.find(SU));
452  }
453 }
454 
455 /// If this queue only has one ready candidate, return it. As a side effect,
456 /// advance the cycle until at least one node is ready. If multiple instructions
457 /// are ready, return NULL.
458 SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
459  if (CheckPending)
460  releasePending();
461 
462  auto AdvanceCycle = [this]() {
463  if (Available.empty())
464  return true;
465  if (Available.size() == 1 && Pending.size() > 0)
466  return !ResourceModel->isResourceAvailable(*Available.begin(), isTop()) ||
467  getWeakLeft(*Available.begin(), isTop()) != 0;
468  return false;
469  };
470  for (unsigned i = 0; AdvanceCycle(); ++i) {
471  assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
472  "permanent hazard"); (void)i;
473  ResourceModel->reserveResources(nullptr, isTop());
474  bumpCycle();
475  releasePending();
476  }
477  if (Available.size() == 1)
478  return *Available.begin();
479  return nullptr;
480 }
481 
482 #ifndef NDEBUG
484  const ReadyQueue &Q, SUnit *SU, int Cost, PressureChange P) {
485  dbgs() << Label << " " << Q.getName() << " ";
486  if (P.isValid())
487  dbgs() << DAG->TRI->getRegPressureSetName(P.getPSet()) << ":"
488  << P.getUnitInc() << " ";
489  else
490  dbgs() << " ";
491  dbgs() << "cost(" << Cost << ")\t";
492  DAG->dumpNode(*SU);
493 }
494 
495 // Very detailed queue dump, to be used with higher verbosity levels.
497  const RegPressureTracker &RPTracker, SchedCandidate &Candidate,
498  ReadyQueue &Q) {
499  RegPressureTracker &TempTracker = const_cast<RegPressureTracker &>(RPTracker);
500 
501  dbgs() << ">>> " << Q.getName() << "\n";
502  for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
503  RegPressureDelta RPDelta;
504  TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
505  DAG->getRegionCriticalPSets(),
506  DAG->getRegPressure().MaxSetPressure);
507  std::stringstream dbgstr;
508  dbgstr << "SU(" << std::setw(3) << (*I)->NodeNum << ")";
509  dbgs() << dbgstr.str();
510  SchedulingCost(Q, *I, Candidate, RPDelta, true);
511  dbgs() << "\t";
512  (*I)->getInstr()->dump();
513  }
514  dbgs() << "\n";
515 }
516 #endif
517 
518 /// isSingleUnscheduledPred - If SU2 is the only unscheduled predecessor
519 /// of SU, return true (we may have duplicates)
520 static inline bool isSingleUnscheduledPred(SUnit *SU, SUnit *SU2) {
521  if (SU->NumPredsLeft == 0)
522  return false;
523 
524  for (auto &Pred : SU->Preds) {
525  // We found an available, but not scheduled, predecessor.
526  if (!Pred.getSUnit()->isScheduled && (Pred.getSUnit() != SU2))
527  return false;
528  }
529 
530  return true;
531 }
532 
533 /// isSingleUnscheduledSucc - If SU2 is the only unscheduled successor
534 /// of SU, return true (we may have duplicates)
535 static inline bool isSingleUnscheduledSucc(SUnit *SU, SUnit *SU2) {
536  if (SU->NumSuccsLeft == 0)
537  return false;
538 
539  for (auto &Succ : SU->Succs) {
540  // We found an available, but not scheduled, successor.
541  if (!Succ.getSUnit()->isScheduled && (Succ.getSUnit() != SU2))
542  return false;
543  }
544  return true;
545 }
546 
547 /// Check if the instruction changes the register pressure of a register in the
548 /// high pressure set. The function returns a negative value if the pressure
549 /// decreases and a positive value is the pressure increases. If the instruction
550 /// doesn't use a high pressure register or doesn't change the register
551 /// pressure, then return 0.
552 int ConvergingVLIWScheduler::pressureChange(const SUnit *SU, bool isBotUp) {
553  PressureDiff &PD = DAG->getPressureDiff(SU);
554  for (auto &P : PD) {
555  if (!P.isValid())
556  continue;
557  // The pressure differences are computed bottom-up, so the comparision for
558  // an increase is positive in the bottom direction, but negative in the
559  // top-down direction.
560  if (HighPressureSets[P.getPSet()])
561  return (isBotUp ? P.getUnitInc() : -P.getUnitInc());
562  }
563  return 0;
564 }
565 
566 // Constants used to denote relative importance of
567 // heuristic components for cost computation.
568 static const unsigned PriorityOne = 200;
569 static const unsigned PriorityTwo = 50;
570 static const unsigned PriorityThree = 75;
571 static const unsigned ScaleTwo = 10;
572 
573 /// Single point to compute overall scheduling cost.
574 /// TODO: More heuristics will be used soon.
576  SchedCandidate &Candidate,
577  RegPressureDelta &Delta,
578  bool verbose) {
579  // Initial trivial priority.
580  int ResCount = 1;
581 
582  // Do not waste time on a node that is already scheduled.
583  if (!SU || SU->isScheduled)
584  return ResCount;
585 
586  LLVM_DEBUG(if (verbose) dbgs()
587  << ((Q.getID() == TopQID) ? "(top|" : "(bot|"));
588  // Forced priority is high.
589  if (SU->isScheduleHigh) {
590  ResCount += PriorityOne;
591  LLVM_DEBUG(dbgs() << "H|");
592  }
593 
594  unsigned IsAvailableAmt = 0;
595  // Critical path first.
596  if (Q.getID() == TopQID) {
597  if (Top.isLatencyBound(SU)) {
598  LLVM_DEBUG(if (verbose) dbgs() << "LB|");
599  ResCount += (SU->getHeight() * ScaleTwo);
600  }
601 
602  LLVM_DEBUG(if (verbose) {
603  std::stringstream dbgstr;
604  dbgstr << "h" << std::setw(3) << SU->getHeight() << "|";
605  dbgs() << dbgstr.str();
606  });
607 
608  // If resources are available for it, multiply the
609  // chance of scheduling.
610  if (Top.ResourceModel->isResourceAvailable(SU, true)) {
611  IsAvailableAmt = (PriorityTwo + PriorityThree);
612  ResCount += IsAvailableAmt;
613  LLVM_DEBUG(if (verbose) dbgs() << "A|");
614  } else
615  LLVM_DEBUG(if (verbose) dbgs() << " |");
616  } else {
617  if (Bot.isLatencyBound(SU)) {
618  LLVM_DEBUG(if (verbose) dbgs() << "LB|");
619  ResCount += (SU->getDepth() * ScaleTwo);
620  }
621 
622  LLVM_DEBUG(if (verbose) {
623  std::stringstream dbgstr;
624  dbgstr << "d" << std::setw(3) << SU->getDepth() << "|";
625  dbgs() << dbgstr.str();
626  });
627 
628  // If resources are available for it, multiply the
629  // chance of scheduling.
630  if (Bot.ResourceModel->isResourceAvailable(SU, false)) {
631  IsAvailableAmt = (PriorityTwo + PriorityThree);
632  ResCount += IsAvailableAmt;
633  LLVM_DEBUG(if (verbose) dbgs() << "A|");
634  } else
635  LLVM_DEBUG(if (verbose) dbgs() << " |");
636  }
637 
638  unsigned NumNodesBlocking = 0;
639  if (Q.getID() == TopQID) {
640  // How many SUs does it block from scheduling?
641  // Look at all of the successors of this node.
642  // Count the number of nodes that
643  // this node is the sole unscheduled node for.
644  if (Top.isLatencyBound(SU))
645  for (const SDep &SI : SU->Succs)
646  if (isSingleUnscheduledPred(SI.getSUnit(), SU))
647  ++NumNodesBlocking;
648  } else {
649  // How many unscheduled predecessors block this node?
650  if (Bot.isLatencyBound(SU))
651  for (const SDep &PI : SU->Preds)
652  if (isSingleUnscheduledSucc(PI.getSUnit(), SU))
653  ++NumNodesBlocking;
654  }
655  ResCount += (NumNodesBlocking * ScaleTwo);
656 
657  LLVM_DEBUG(if (verbose) {
658  std::stringstream dbgstr;
659  dbgstr << "blk " << std::setw(2) << NumNodesBlocking << ")|";
660  dbgs() << dbgstr.str();
661  });
662 
663  // Factor in reg pressure as a heuristic.
664  if (!IgnoreBBRegPressure) {
665  // Decrease priority by the amount that register pressure exceeds the limit.
666  ResCount -= (Delta.Excess.getUnitInc()*PriorityOne);
667  // Decrease priority if register pressure exceeds the limit.
668  ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityOne);
669  // Decrease priority slightly if register pressure would increase over the
670  // current maximum.
671  ResCount -= (Delta.CurrentMax.getUnitInc()*PriorityTwo);
672  // If there are register pressure issues, then we remove the value added for
673  // the instruction being available. The rationale is that we really don't
674  // want to schedule an instruction that causes a spill.
675  if (IsAvailableAmt && pressureChange(SU, Q.getID() != TopQID) > 0 &&
676  (Delta.Excess.getUnitInc() || Delta.CriticalMax.getUnitInc() ||
677  Delta.CurrentMax.getUnitInc()))
678  ResCount -= IsAvailableAmt;
679  LLVM_DEBUG(if (verbose) {
680  dbgs() << "RP " << Delta.Excess.getUnitInc() << "/"
681  << Delta.CriticalMax.getUnitInc() << "/"
682  << Delta.CurrentMax.getUnitInc() << ")|";
683  });
684  }
685 
686  // Give a little extra priority to a .cur instruction if there is a resource
687  // available for it.
688  auto &QST = DAG->MF.getSubtarget<HexagonSubtarget>();
689  auto &QII = *QST.getInstrInfo();
690  if (SU->isInstr() && QII.mayBeCurLoad(*SU->getInstr())) {
691  if (Q.getID() == TopQID &&
692  Top.ResourceModel->isResourceAvailable(SU, true)) {
693  ResCount += PriorityTwo;
694  LLVM_DEBUG(if (verbose) dbgs() << "C|");
695  } else if (Q.getID() == BotQID &&
696  Bot.ResourceModel->isResourceAvailable(SU, false)) {
697  ResCount += PriorityTwo;
698  LLVM_DEBUG(if (verbose) dbgs() << "C|");
699  }
700  }
701 
702  // Give preference to a zero latency instruction if the dependent
703  // instruction is in the current packet.
704  if (Q.getID() == TopQID && getWeakLeft(SU, true) == 0) {
705  for (const SDep &PI : SU->Preds) {
706  if (!PI.getSUnit()->getInstr()->isPseudo() && PI.isAssignedRegDep() &&
707  PI.getLatency() == 0 &&
708  Top.ResourceModel->isInPacket(PI.getSUnit())) {
709  ResCount += PriorityThree;
710  LLVM_DEBUG(if (verbose) dbgs() << "Z|");
711  }
712  }
713  } else if (Q.getID() == BotQID && getWeakLeft(SU, false) == 0) {
714  for (const SDep &SI : SU->Succs) {
715  if (!SI.getSUnit()->getInstr()->isPseudo() && SI.isAssignedRegDep() &&
716  SI.getLatency() == 0 &&
717  Bot.ResourceModel->isInPacket(SI.getSUnit())) {
718  ResCount += PriorityThree;
719  LLVM_DEBUG(if (verbose) dbgs() << "Z|");
720  }
721  }
722  }
723 
724  // If the instruction has a non-zero latency dependence with an instruction in
725  // the current packet, then it should not be scheduled yet. The case occurs
726  // when the dependent instruction is scheduled in a new packet, so the
727  // scheduler updates the current cycle and pending instructions become
728  // available.
729  if (CheckEarlyAvail) {
730  if (Q.getID() == TopQID) {
731  for (const auto &PI : SU->Preds) {
732  if (PI.getLatency() > 0 &&
733  Top.ResourceModel->isInPacket(PI.getSUnit())) {
734  ResCount -= PriorityOne;
735  LLVM_DEBUG(if (verbose) dbgs() << "D|");
736  }
737  }
738  } else {
739  for (const auto &SI : SU->Succs) {
740  if (SI.getLatency() > 0 &&
741  Bot.ResourceModel->isInPacket(SI.getSUnit())) {
742  ResCount -= PriorityOne;
743  LLVM_DEBUG(if (verbose) dbgs() << "D|");
744  }
745  }
746  }
747  }
748 
749  LLVM_DEBUG(if (verbose) {
750  std::stringstream dbgstr;
751  dbgstr << "Total " << std::setw(4) << ResCount << ")";
752  dbgs() << dbgstr.str();
753  });
754 
755  return ResCount;
756 }
757 
758 /// Pick the best candidate from the top queue.
759 ///
760 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
761 /// DAG building. To adjust for the current scheduling location we need to
762 /// maintain the number of vreg uses remaining to be top-scheduled.
763 ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
764 pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
765  SchedCandidate &Candidate) {
766  ReadyQueue &Q = Zone.Available;
768  readyQueueVerboseDump(RPTracker, Candidate, Q);
769  else Q.dump(););
770 
771  // getMaxPressureDelta temporarily modifies the tracker.
772  RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
773 
774  // BestSU remains NULL if no top candidates beat the best existing candidate.
775  CandResult FoundCandidate = NoCand;
776  for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
777  RegPressureDelta RPDelta;
778  TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
779  DAG->getRegionCriticalPSets(),
780  DAG->getRegPressure().MaxSetPressure);
781 
782  int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
783 
784  // Initialize the candidate if needed.
785  if (!Candidate.SU) {
786  LLVM_DEBUG(traceCandidate("DCAND", Q, *I, CurrentCost));
787  Candidate.SU = *I;
788  Candidate.RPDelta = RPDelta;
789  Candidate.SCost = CurrentCost;
790  FoundCandidate = NodeOrder;
791  continue;
792  }
793 
794  // Choose node order for negative cost candidates. There is no good
795  // candidate in this case.
796  if (CurrentCost < 0 && Candidate.SCost < 0) {
797  if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
798  || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
799  LLVM_DEBUG(traceCandidate("NCAND", Q, *I, CurrentCost));
800  Candidate.SU = *I;
801  Candidate.RPDelta = RPDelta;
802  Candidate.SCost = CurrentCost;
803  FoundCandidate = NodeOrder;
804  }
805  continue;
806  }
807 
808  // Best cost.
809  if (CurrentCost > Candidate.SCost) {
810  LLVM_DEBUG(traceCandidate("CCAND", Q, *I, CurrentCost));
811  Candidate.SU = *I;
812  Candidate.RPDelta = RPDelta;
813  Candidate.SCost = CurrentCost;
814  FoundCandidate = BestCost;
815  continue;
816  }
817 
818  // Choose an instruction that does not depend on an artificial edge.
819  unsigned CurrWeak = getWeakLeft(*I, (Q.getID() == TopQID));
820  unsigned CandWeak = getWeakLeft(Candidate.SU, (Q.getID() == TopQID));
821  if (CurrWeak != CandWeak) {
822  if (CurrWeak < CandWeak) {
823  LLVM_DEBUG(traceCandidate("WCAND", Q, *I, CurrentCost));
824  Candidate.SU = *I;
825  Candidate.RPDelta = RPDelta;
826  Candidate.SCost = CurrentCost;
827  FoundCandidate = Weak;
828  }
829  continue;
830  }
831 
832  if (CurrentCost == Candidate.SCost && Zone.isLatencyBound(*I)) {
833  unsigned CurrSize, CandSize;
834  if (Q.getID() == TopQID) {
835  CurrSize = (*I)->Succs.size();
836  CandSize = Candidate.SU->Succs.size();
837  } else {
838  CurrSize = (*I)->Preds.size();
839  CandSize = Candidate.SU->Preds.size();
840  }
841  if (CurrSize > CandSize) {
842  LLVM_DEBUG(traceCandidate("SPCAND", Q, *I, CurrentCost));
843  Candidate.SU = *I;
844  Candidate.RPDelta = RPDelta;
845  Candidate.SCost = CurrentCost;
846  FoundCandidate = BestCost;
847  }
848  // Keep the old candidate if it's a better candidate. That is, don't use
849  // the subsequent tie breaker.
850  if (CurrSize != CandSize)
851  continue;
852  }
853 
854  // Tie breaker.
855  // To avoid scheduling indeterminism, we need a tie breaker
856  // for the case when cost is identical for two nodes.
857  if (UseNewerCandidate && CurrentCost == Candidate.SCost) {
858  if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
859  || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
860  LLVM_DEBUG(traceCandidate("TCAND", Q, *I, CurrentCost));
861  Candidate.SU = *I;
862  Candidate.RPDelta = RPDelta;
863  Candidate.SCost = CurrentCost;
864  FoundCandidate = NodeOrder;
865  continue;
866  }
867  }
868 
869  // Fall through to original instruction order.
870  // Only consider node order if Candidate was chosen from this Q.
871  if (FoundCandidate == NoCand)
872  continue;
873  }
874  return FoundCandidate;
875 }
876 
877 /// Pick the best candidate node from either the top or bottom queue.
879  // Schedule as far as possible in the direction of no choice. This is most
880  // efficient, but also provides the best heuristics for CriticalPSets.
881  if (SUnit *SU = Bot.pickOnlyChoice()) {
882  LLVM_DEBUG(dbgs() << "Picked only Bottom\n");
883  IsTopNode = false;
884  return SU;
885  }
886  if (SUnit *SU = Top.pickOnlyChoice()) {
887  LLVM_DEBUG(dbgs() << "Picked only Top\n");
888  IsTopNode = true;
889  return SU;
890  }
891  SchedCandidate BotCand;
892  // Prefer bottom scheduling when heuristics are silent.
893  CandResult BotResult = pickNodeFromQueue(Bot,
894  DAG->getBotRPTracker(), BotCand);
895  assert(BotResult != NoCand && "failed to find the first candidate");
896 
897  // If either Q has a single candidate that provides the least increase in
898  // Excess pressure, we can immediately schedule from that Q.
899  //
900  // RegionCriticalPSets summarizes the pressure within the scheduled region and
901  // affects picking from either Q. If scheduling in one direction must
902  // increase pressure for one of the excess PSets, then schedule in that
903  // direction first to provide more freedom in the other direction.
904  if (BotResult == SingleExcess || BotResult == SingleCritical) {
905  LLVM_DEBUG(dbgs() << "Prefered Bottom Node\n");
906  IsTopNode = false;
907  return BotCand.SU;
908  }
909  // Check if the top Q has a better candidate.
910  SchedCandidate TopCand;
911  CandResult TopResult = pickNodeFromQueue(Top,
912  DAG->getTopRPTracker(), TopCand);
913  assert(TopResult != NoCand && "failed to find the first candidate");
914 
915  if (TopResult == SingleExcess || TopResult == SingleCritical) {
916  LLVM_DEBUG(dbgs() << "Prefered Top Node\n");
917  IsTopNode = true;
918  return TopCand.SU;
919  }
920  // If either Q has a single candidate that minimizes pressure above the
921  // original region's pressure pick it.
922  if (BotResult == SingleMax) {
923  LLVM_DEBUG(dbgs() << "Prefered Bottom Node SingleMax\n");
924  IsTopNode = false;
925  return BotCand.SU;
926  }
927  if (TopResult == SingleMax) {
928  LLVM_DEBUG(dbgs() << "Prefered Top Node SingleMax\n");
929  IsTopNode = true;
930  return TopCand.SU;
931  }
932  if (TopCand.SCost > BotCand.SCost) {
933  LLVM_DEBUG(dbgs() << "Prefered Top Node Cost\n");
934  IsTopNode = true;
935  return TopCand.SU;
936  }
937  // Otherwise prefer the bottom candidate in node order.
938  LLVM_DEBUG(dbgs() << "Prefered Bottom in Node order\n");
939  IsTopNode = false;
940  return BotCand.SU;
941 }
942 
943 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
945  if (DAG->top() == DAG->bottom()) {
946  assert(Top.Available.empty() && Top.Pending.empty() &&
947  Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
948  return nullptr;
949  }
950  SUnit *SU;
951  if (ForceTopDown) {
952  SU = Top.pickOnlyChoice();
953  if (!SU) {
954  SchedCandidate TopCand;
955  CandResult TopResult =
956  pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
957  assert(TopResult != NoCand && "failed to find the first candidate");
958  (void)TopResult;
959  SU = TopCand.SU;
960  }
961  IsTopNode = true;
962  } else if (ForceBottomUp) {
963  SU = Bot.pickOnlyChoice();
964  if (!SU) {
965  SchedCandidate BotCand;
966  CandResult BotResult =
967  pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
968  assert(BotResult != NoCand && "failed to find the first candidate");
969  (void)BotResult;
970  SU = BotCand.SU;
971  }
972  IsTopNode = false;
973  } else {
974  SU = pickNodeBidrectional(IsTopNode);
975  }
976  if (SU->isTopReady())
977  Top.removeReady(SU);
978  if (SU->isBottomReady())
979  Bot.removeReady(SU);
980 
981  LLVM_DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
982  << " Scheduling instruction in cycle "
983  << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << " ("
984  << reportPackets() << ")\n";
985  DAG->dumpNode(*SU));
986  return SU;
987 }
988 
989 /// Update the scheduler's state after scheduling a node. This is the same node
990 /// that was just returned by pickNode(). However, VLIWMachineScheduler needs
991 /// to update it's state based on the current cycle before MachineSchedStrategy
992 /// does.
993 void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
994  if (IsTopNode) {
995  Top.bumpNode(SU);
996  SU->TopReadyCycle = Top.CurrCycle;
997  } else {
998  Bot.bumpNode(SU);
999  SU->BotReadyCycle = Bot.CurrCycle;
1000  }
1001 }
VLIWResourceModel(const TargetSubtargetInfo &STI, const TargetSchedModel *SM)
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
void schedule() override
Schedule - This is called back from ScheduleDAGInstrs::Run() when it&#39;s time to do some work...
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:250
static bool isSingleUnscheduledPred(SUnit *SU, SUnit *SU2)
isSingleUnscheduledPred - If SU2 is the only unscheduled predecessor of SU, return true (we may have ...
This class represents lattice values for constants.
Definition: AllocatorList.h:24
Extend the standard ScheduleDAGMI to provide more context and override the top-level schedule() drive...
static cl::opt< unsigned > SchedDebugVerboseLevel("misched-verbose-level", cl::Hidden, cl::ZeroOrMore, cl::init(1))
void readyQueueVerboseDump(const RegPressureTracker &RPTracker, SchedCandidate &Candidate, ReadyQueue &Q)
int SchedulingCost(ReadyQueue &Q, SUnit *SU, SchedCandidate &Candidate, RegPressureDelta &Delta, bool verbose)
Single point to compute overall scheduling cost.
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:402
bool isBottomReady() const
Definition: ScheduleDAG.h:453
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
Definition: ScheduleDAG.h:212
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Definition: ScheduleDAG.h:304
SmallVectorImpl< SDep >::iterator succ_iterator
Definition: ScheduleDAG.h:264
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:260
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:288
unsigned getID() const
SUnit * pickNodeBidrectional(bool &IsTopNode)
Pick the best candidate node from either the top or bottom queue.
static const unsigned ScaleTwo
unsigned getLatency() const
Returns the latency value for this edge, which roughly means the minimum number of cycles that must e...
Definition: ScheduleDAG.h:143
unsigned NumSuccsLeft
of succs not scheduled.
Definition: ScheduleDAG.h:273
const HexagonInstrInfo * TII
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
bool canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const
Can these instructions execute at the same time in a bundle.
static cl::opt< bool > IgnoreBBRegPressure("ignore-bb-reg-pressure", cl::Hidden, cl::ZeroOrMore, cl::init(false))
SUnit * pickNode(bool &IsTopNode) override
Pick the best node to balance the schedule. Implements MachineSchedStrategy.
StringRef getName() const
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
CandResult pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker, SchedCandidate &Candidate)
Pick the best candidate from the top queue.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:667
unsigned TopReadyCycle
Cycle relative to start when node is ready.
Definition: ScheduleDAG.h:303
unsigned getNumMicroOps(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return the number of issue slots required for this MI.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling...
Itinerary data supplied by a subtarget to be used by a target.
unsigned NumPredsLeft
of preds not scheduled.
Definition: ScheduleDAG.h:272
bool isTopReady() const
Definition: ScheduleDAG.h:450
virtual const TargetInstrInfo * getInstrInfo() const
SUnit * getSUnit() const
Definition: ScheduleDAG.h:484
std::vector< SUnit * >::iterator iterator
TargetInstrInfo - Interface to description of machine instruction set.
Scheduling dependency.
Definition: ScheduleDAG.h:50
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
bool isCall
Is a function call.
Definition: ScheduleDAG.h:279
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
Definition: ScheduleDAG.h:377
Helpers for implementing custom MachineSchedStrategy classes.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool isPseudo(QueryType Type=IgnoreBundle) const
Return true if this is a pseudo instruction that doesn&#39;t correspond to a real machine instruction...
Definition: MachineInstr.h:619
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
int pressureChange(const SUnit *SU, bool isBotUp)
Check if the instruction changes the register pressure of a register in the high pressure set...
Track the current register pressure at some position in the instruction stream, and remember the high...
List of PressureChanges in order of increasing, unique PSetID.
bool reserveResources(SUnit *SU, bool IsTop)
Keep track of available resources.
void init(const TargetSubtargetInfo *TSInfo)
Initialize the machine model for instruction scheduling.
unsigned getWeakLeft(const SUnit *SU, bool isTop)
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:672
static bool isSingleUnscheduledSucc(SUnit *SU, SUnit *SU2)
isSingleUnscheduledSucc - If SU2 is the only unscheduled successor of SU, return true (we may have du...
bool isScheduleHigh
True if preferable to schedule high.
Definition: ScheduleDAG.h:289
void schedNode(SUnit *SU, bool IsTopNode) override
Update the scheduler&#39;s state after scheduling a node.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
bool canReserveResources(const MCInstrDesc *MID)
void getMaxPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta, ArrayRef< PressureChange > CriticalPSets, ArrayRef< unsigned > MaxPressureLimit)
Find the pressure set with the most change beyond its pressure limit after traversing this instructio...
void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU, int Cost, PressureChange P=PressureChange())
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
bool isResourceAvailable(SUnit *SU, bool IsTop)
Check if scheduling of this SU is possible in the current packet.
PressureChange CriticalMax
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
TargetSubtargetInfo - Generic base class for all target subtargets.
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:410
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static const unsigned PriorityOne
cl::opt< bool > ForceBottomUp
static bool hasDependence(const SUnit *SUd, const SUnit *SUu, const HexagonInstrInfo &QII)
Return true if there is a dependence between SUd and SUu.
#define I(x, y, z)
Definition: MD5.cpp:58
Capture a change in pressure for a single pressure set.
static const unsigned PriorityThree
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
Store the effects of a change in pressure on things that MI scheduler cares about.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< bool > CheckEarlyAvail("check-early-avail", cl::Hidden, cl::ZeroOrMore, cl::init(true))
static const unsigned PriorityTwo
const HexagonInstrInfo * getInstrInfo() const override
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:261
static const Function * getParent(const Value *V)
bool mayBeCurLoad(const MachineInstr &MI) const
unsigned getPSet() const
unsigned getIssueWidth() const
Maximum number of micro-ops that may be scheduled per cycle.
#define LLVM_DEBUG(X)
Definition: Debug.h:123
static cl::opt< bool > UseNewerCandidate("use-newer-candidate", cl::Hidden, cl::ZeroOrMore, cl::init(true))
static cl::opt< float > RPThreshold("hexagon-reg-pressure", cl::Hidden, cl::init(0.75f), cl::desc("High register pressure threhold."))
bool isInstr() const
Returns true if this SUnit refers to a machine instruction as opposed to an SDNode.
Definition: ScheduleDAG.h:366
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget&#39;s CPU.
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:246
cl::opt< bool > ForceTopDown
void reserveResources(const MCInstrDesc *MID)
void initialize(ScheduleDAGMI *dag) override
Initialize the strategy after building the DAG for a new region.