LLVM  8.0.1
PostRASchedulerList.cpp
Go to the documentation of this file.
1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
14 //
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
18 //
19 //===----------------------------------------------------------------------===//
20 
22 #include "AntiDepBreaker.h"
23 #include "CriticalAntiDepBreaker.h"
24 #include "llvm/ADT/Statistic.h"
31 #include "llvm/CodeGen/Passes.h"
41 #include "llvm/Config/llvm-config.h"
43 #include "llvm/Support/Debug.h"
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "post-RA-sched"
49 
50 STATISTIC(NumNoops, "Number of noops inserted");
51 STATISTIC(NumStalls, "Number of pipeline stalls");
52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
53 
54 // Post-RA scheduling is enabled with
55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
56 // override the target.
57 static cl::opt<bool>
58 EnablePostRAScheduler("post-RA-scheduler",
59  cl::desc("Enable scheduling after register allocation"),
60  cl::init(false), cl::Hidden);
62 EnableAntiDepBreaking("break-anti-dependencies",
63  cl::desc("Break post-RA scheduling anti-dependencies: "
64  "\"critical\", \"all\", or \"none\""),
65  cl::init("none"), cl::Hidden);
66 
67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
68 static cl::opt<int>
69 DebugDiv("postra-sched-debugdiv",
70  cl::desc("Debug control MBBs that are scheduled"),
71  cl::init(0), cl::Hidden);
72 static cl::opt<int>
73 DebugMod("postra-sched-debugmod",
74  cl::desc("Debug control MBBs that are scheduled"),
75  cl::init(0), cl::Hidden);
76 
78 
79 namespace {
80  class PostRAScheduler : public MachineFunctionPass {
81  const TargetInstrInfo *TII;
82  RegisterClassInfo RegClassInfo;
83 
84  public:
85  static char ID;
86  PostRAScheduler() : MachineFunctionPass(ID) {}
87 
88  void getAnalysisUsage(AnalysisUsage &AU) const override {
89  AU.setPreservesCFG();
97  }
98 
99  MachineFunctionProperties getRequiredProperties() const override {
102  }
103 
104  bool runOnMachineFunction(MachineFunction &Fn) override;
105 
106  private:
107  bool enablePostRAScheduler(
108  const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
110  TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
111  };
112  char PostRAScheduler::ID = 0;
113 
114  class SchedulePostRATDList : public ScheduleDAGInstrs {
115  /// AvailableQueue - The priority queue to use for the available SUnits.
116  ///
117  LatencyPriorityQueue AvailableQueue;
118 
119  /// PendingQueue - This contains all of the instructions whose operands have
120  /// been issued, but their results are not ready yet (due to the latency of
121  /// the operation). Once the operands becomes available, the instruction is
122  /// added to the AvailableQueue.
123  std::vector<SUnit*> PendingQueue;
124 
125  /// HazardRec - The hazard recognizer to use.
126  ScheduleHazardRecognizer *HazardRec;
127 
128  /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
129  AntiDepBreaker *AntiDepBreak;
130 
131  /// AA - AliasAnalysis for making memory reference queries.
132  AliasAnalysis *AA;
133 
134  /// The schedule. Null SUnit*'s represent noop instructions.
135  std::vector<SUnit*> Sequence;
136 
137  /// Ordered list of DAG postprocessing steps.
138  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
139 
140  /// The index in BB of RegionEnd.
141  ///
142  /// This is the instruction number from the top of the current block, not
143  /// the SlotIndex. It is only used by the AntiDepBreaker.
144  unsigned EndIndex;
145 
146  public:
147  SchedulePostRATDList(
149  const RegisterClassInfo &,
152 
153  ~SchedulePostRATDList() override;
154 
155  /// startBlock - Initialize register live-range state for scheduling in
156  /// this block.
157  ///
158  void startBlock(MachineBasicBlock *BB) override;
159 
160  // Set the index of RegionEnd within the current BB.
161  void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
162 
163  /// Initialize the scheduler state for the next scheduling region.
164  void enterRegion(MachineBasicBlock *bb,
167  unsigned regioninstrs) override;
168 
169  /// Notify that the scheduler has finished scheduling the current region.
170  void exitRegion() override;
171 
172  /// Schedule - Schedule the instruction range using list scheduling.
173  ///
174  void schedule() override;
175 
176  void EmitSchedule();
177 
178  /// Observe - Update liveness information to account for the current
179  /// instruction, which will not be scheduled.
180  ///
181  void Observe(MachineInstr &MI, unsigned Count);
182 
183  /// finishBlock - Clean up register live-range state.
184  ///
185  void finishBlock() override;
186 
187  private:
188  /// Apply each ScheduleDAGMutation step in order.
189  void postprocessDAG();
190 
191  void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
192  void ReleaseSuccessors(SUnit *SU);
193  void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
194  void ListScheduleTopDown();
195 
196  void dumpSchedule() const;
197  void emitNoop(unsigned CurCycle);
198  };
199 }
200 
202 
203 INITIALIZE_PASS(PostRAScheduler, DEBUG_TYPE,
204  "Post RA top-down list latency scheduler", false, false)
205 
206 SchedulePostRATDList::SchedulePostRATDList(
209  TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
210  SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs)
211  : ScheduleDAGInstrs(MF, &MLI), AA(AA), EndIndex(0) {
212 
213  const InstrItineraryData *InstrItins =
214  MF.getSubtarget().getInstrItineraryData();
215  HazardRec =
216  MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer(
217  InstrItins, this);
218  MF.getSubtarget().getPostRAMutations(Mutations);
219 
220  assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
221  MRI.tracksLiveness()) &&
222  "Live-ins must be accurate for anti-dependency breaking");
223  AntiDepBreak =
224  ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
225  (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
226  ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
227  (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr));
228 }
229 
230 SchedulePostRATDList::~SchedulePostRATDList() {
231  delete HazardRec;
232  delete AntiDepBreak;
233 }
234 
235 /// Initialize state associated with the next scheduling region.
236 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
239  unsigned regioninstrs) {
240  ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
241  Sequence.clear();
242 }
243 
244 /// Print the schedule before exiting the region.
245 void SchedulePostRATDList::exitRegion() {
246  LLVM_DEBUG({
247  dbgs() << "*** Final schedule ***\n";
248  dumpSchedule();
249  dbgs() << '\n';
250  });
252 }
253 
254 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
255 /// dumpSchedule - dump the scheduled Sequence.
256 LLVM_DUMP_METHOD void SchedulePostRATDList::dumpSchedule() const {
257  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
258  if (SUnit *SU = Sequence[i])
259  dumpNode(*SU);
260  else
261  dbgs() << "**** NOOP ****\n";
262  }
263 }
264 #endif
265 
266 bool PostRAScheduler::enablePostRAScheduler(
267  const TargetSubtargetInfo &ST,
268  CodeGenOpt::Level OptLevel,
270  TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
271  Mode = ST.getAntiDepBreakMode();
272  ST.getCriticalPathRCs(CriticalPathRCs);
273 
274  // Check for explicit enable/disable of post-ra scheduling.
275  if (EnablePostRAScheduler.getPosition() > 0)
276  return EnablePostRAScheduler;
277 
278  return ST.enablePostRAScheduler() &&
279  OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
280 }
281 
282 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
283  if (skipFunction(Fn.getFunction()))
284  return false;
285 
286  TII = Fn.getSubtarget().getInstrInfo();
287  MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
288  AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
289  TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
290 
291  RegClassInfo.runOnMachineFunction(Fn);
292 
294  TargetSubtargetInfo::ANTIDEP_NONE;
296 
297  // Check that post-RA scheduling is enabled for this target.
298  // This may upgrade the AntiDepMode.
299  if (!enablePostRAScheduler(Fn.getSubtarget(), PassConfig->getOptLevel(),
300  AntiDepMode, CriticalPathRCs))
301  return false;
302 
303  // Check for antidep breaking override...
304  if (EnableAntiDepBreaking.getPosition() > 0) {
305  AntiDepMode = (EnableAntiDepBreaking == "all")
306  ? TargetSubtargetInfo::ANTIDEP_ALL
307  : ((EnableAntiDepBreaking == "critical")
308  ? TargetSubtargetInfo::ANTIDEP_CRITICAL
309  : TargetSubtargetInfo::ANTIDEP_NONE);
310  }
311 
312  LLVM_DEBUG(dbgs() << "PostRAScheduler\n");
313 
314  SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
315  CriticalPathRCs);
316 
317  // Loop over all of the basic blocks
318  for (auto &MBB : Fn) {
319 #ifndef NDEBUG
320  // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
321  if (DebugDiv > 0) {
322  static int bbcnt = 0;
323  if (bbcnt++ % DebugDiv != DebugMod)
324  continue;
325  dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":"
326  << printMBBReference(MBB) << " ***\n";
327  }
328 #endif
329 
330  // Initialize register live-range state for scheduling in this block.
331  Scheduler.startBlock(&MBB);
332 
333  // Schedule each sequence of instructions not interrupted by a label
334  // or anything else that effectively needs to shut down scheduling.
335  MachineBasicBlock::iterator Current = MBB.end();
336  unsigned Count = MBB.size(), CurrentCount = Count;
337  for (MachineBasicBlock::iterator I = Current; I != MBB.begin();) {
338  MachineInstr &MI = *std::prev(I);
339  --Count;
340  // Calls are not scheduling boundaries before register allocation, but
341  // post-ra we don't gain anything by scheduling across calls since we
342  // don't need to worry about register pressure.
343  if (MI.isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) {
344  Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count);
345  Scheduler.setEndIndex(CurrentCount);
346  Scheduler.schedule();
347  Scheduler.exitRegion();
348  Scheduler.EmitSchedule();
349  Current = &MI;
350  CurrentCount = Count;
351  Scheduler.Observe(MI, CurrentCount);
352  }
353  I = MI;
354  if (MI.isBundle())
355  Count -= MI.getBundleSize();
356  }
357  assert(Count == 0 && "Instruction count mismatch!");
358  assert((MBB.begin() == Current || CurrentCount != 0) &&
359  "Instruction count mismatch!");
360  Scheduler.enterRegion(&MBB, MBB.begin(), Current, CurrentCount);
361  Scheduler.setEndIndex(CurrentCount);
362  Scheduler.schedule();
363  Scheduler.exitRegion();
364  Scheduler.EmitSchedule();
365 
366  // Clean up register live-range state.
367  Scheduler.finishBlock();
368 
369  // Update register kills
370  Scheduler.fixupKills(MBB);
371  }
372 
373  return true;
374 }
375 
376 /// StartBlock - Initialize register live-range state for scheduling in
377 /// this block.
378 ///
379 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
380  // Call the superclass.
382 
383  // Reset the hazard recognizer and anti-dep breaker.
384  HazardRec->Reset();
385  if (AntiDepBreak)
386  AntiDepBreak->StartBlock(BB);
387 }
388 
389 /// Schedule - Schedule the instruction range using list scheduling.
390 ///
391 void SchedulePostRATDList::schedule() {
392  // Build the scheduling graph.
393  buildSchedGraph(AA);
394 
395  if (AntiDepBreak) {
396  unsigned Broken =
397  AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
398  EndIndex, DbgValues);
399 
400  if (Broken != 0) {
401  // We made changes. Update the dependency graph.
402  // Theoretically we could update the graph in place:
403  // When a live range is changed to use a different register, remove
404  // the def's anti-dependence *and* output-dependence edges due to
405  // that register, and add new anti-dependence and output-dependence
406  // edges based on the next live range of the register.
408  buildSchedGraph(AA);
409 
410  NumFixedAnti += Broken;
411  }
412  }
413 
414  postprocessDAG();
415 
416  LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
417  LLVM_DEBUG(dump());
418 
419  AvailableQueue.initNodes(SUnits);
420  ListScheduleTopDown();
421  AvailableQueue.releaseState();
422 }
423 
424 /// Observe - Update liveness information to account for the current
425 /// instruction, which will not be scheduled.
426 ///
427 void SchedulePostRATDList::Observe(MachineInstr &MI, unsigned Count) {
428  if (AntiDepBreak)
429  AntiDepBreak->Observe(MI, Count, EndIndex);
430 }
431 
432 /// FinishBlock - Clean up register live-range state.
433 ///
434 void SchedulePostRATDList::finishBlock() {
435  if (AntiDepBreak)
436  AntiDepBreak->FinishBlock();
437 
438  // Call the superclass.
440 }
441 
442 /// Apply each ScheduleDAGMutation step in order.
443 void SchedulePostRATDList::postprocessDAG() {
444  for (auto &M : Mutations)
445  M->apply(this);
446 }
447 
448 //===----------------------------------------------------------------------===//
449 // Top-Down Scheduling
450 //===----------------------------------------------------------------------===//
451 
452 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
453 /// the PendingQueue if the count reaches zero.
454 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
455  SUnit *SuccSU = SuccEdge->getSUnit();
456 
457  if (SuccEdge->isWeak()) {
458  --SuccSU->WeakPredsLeft;
459  return;
460  }
461 #ifndef NDEBUG
462  if (SuccSU->NumPredsLeft == 0) {
463  dbgs() << "*** Scheduling failed! ***\n";
464  dumpNode(*SuccSU);
465  dbgs() << " has been released too many times!\n";
466  llvm_unreachable(nullptr);
467  }
468 #endif
469  --SuccSU->NumPredsLeft;
470 
471  // Standard scheduler algorithms will recompute the depth of the successor
472  // here as such:
473  // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
474  //
475  // However, we lazily compute node depth instead. Note that
476  // ScheduleNodeTopDown has already updated the depth of this node which causes
477  // all descendents to be marked dirty. Setting the successor depth explicitly
478  // here would cause depth to be recomputed for all its ancestors. If the
479  // successor is not yet ready (because of a transitively redundant edge) then
480  // this causes depth computation to be quadratic in the size of the DAG.
481 
482  // If all the node's predecessors are scheduled, this node is ready
483  // to be scheduled. Ignore the special ExitSU node.
484  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
485  PendingQueue.push_back(SuccSU);
486 }
487 
488 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
489 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
490  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
491  I != E; ++I) {
492  ReleaseSucc(SU, &*I);
493  }
494 }
495 
496 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
497 /// count of its successors. If a successor pending count is zero, add it to
498 /// the Available queue.
499 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
500  LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
501  LLVM_DEBUG(dumpNode(*SU));
502 
503  Sequence.push_back(SU);
504  assert(CurCycle >= SU->getDepth() &&
505  "Node scheduled above its depth!");
506  SU->setDepthToAtLeast(CurCycle);
507 
508  ReleaseSuccessors(SU);
509  SU->isScheduled = true;
510  AvailableQueue.scheduledNode(SU);
511 }
512 
513 /// emitNoop - Add a noop to the current instruction sequence.
514 void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
515  LLVM_DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
516  HazardRec->EmitNoop();
517  Sequence.push_back(nullptr); // NULL here means noop
518  ++NumNoops;
519 }
520 
521 /// ListScheduleTopDown - The main loop of list scheduling for top-down
522 /// schedulers.
523 void SchedulePostRATDList::ListScheduleTopDown() {
524  unsigned CurCycle = 0;
525 
526  // We're scheduling top-down but we're visiting the regions in
527  // bottom-up order, so we don't know the hazards at the start of a
528  // region. So assume no hazards (this should usually be ok as most
529  // blocks are a single region).
530  HazardRec->Reset();
531 
532  // Release any successors of the special Entry node.
533  ReleaseSuccessors(&EntrySU);
534 
535  // Add all leaves to Available queue.
536  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
537  // It is available if it has no predecessors.
538  if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
539  AvailableQueue.push(&SUnits[i]);
540  SUnits[i].isAvailable = true;
541  }
542  }
543 
544  // In any cycle where we can't schedule any instructions, we must
545  // stall or emit a noop, depending on the target.
546  bool CycleHasInsts = false;
547 
548  // While Available queue is not empty, grab the node with the highest
549  // priority. If it is not ready put it back. Schedule the node.
550  std::vector<SUnit*> NotReady;
551  Sequence.reserve(SUnits.size());
552  while (!AvailableQueue.empty() || !PendingQueue.empty()) {
553  // Check to see if any of the pending instructions are ready to issue. If
554  // so, add them to the available queue.
555  unsigned MinDepth = ~0u;
556  for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
557  if (PendingQueue[i]->getDepth() <= CurCycle) {
558  AvailableQueue.push(PendingQueue[i]);
559  PendingQueue[i]->isAvailable = true;
560  PendingQueue[i] = PendingQueue.back();
561  PendingQueue.pop_back();
562  --i; --e;
563  } else if (PendingQueue[i]->getDepth() < MinDepth)
564  MinDepth = PendingQueue[i]->getDepth();
565  }
566 
567  LLVM_DEBUG(dbgs() << "\n*** Examining Available\n";
568  AvailableQueue.dump(this));
569 
570  SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
571  bool HasNoopHazards = false;
572  while (!AvailableQueue.empty()) {
573  SUnit *CurSUnit = AvailableQueue.pop();
574 
576  HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
578  if (HazardRec->ShouldPreferAnother(CurSUnit)) {
579  if (!NotPreferredSUnit) {
580  // If this is the first non-preferred node for this cycle, then
581  // record it and continue searching for a preferred node. If this
582  // is not the first non-preferred node, then treat it as though
583  // there had been a hazard.
584  NotPreferredSUnit = CurSUnit;
585  continue;
586  }
587  } else {
588  FoundSUnit = CurSUnit;
589  break;
590  }
591  }
592 
593  // Remember if this is a noop hazard.
594  HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
595 
596  NotReady.push_back(CurSUnit);
597  }
598 
599  // If we have a non-preferred node, push it back onto the available list.
600  // If we did not find a preferred node, then schedule this first
601  // non-preferred node.
602  if (NotPreferredSUnit) {
603  if (!FoundSUnit) {
604  LLVM_DEBUG(
605  dbgs() << "*** Will schedule a non-preferred instruction...\n");
606  FoundSUnit = NotPreferredSUnit;
607  } else {
608  AvailableQueue.push(NotPreferredSUnit);
609  }
610 
611  NotPreferredSUnit = nullptr;
612  }
613 
614  // Add the nodes that aren't ready back onto the available list.
615  if (!NotReady.empty()) {
616  AvailableQueue.push_all(NotReady);
617  NotReady.clear();
618  }
619 
620  // If we found a node to schedule...
621  if (FoundSUnit) {
622  // If we need to emit noops prior to this instruction, then do so.
623  unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
624  for (unsigned i = 0; i != NumPreNoops; ++i)
625  emitNoop(CurCycle);
626 
627  // ... schedule the node...
628  ScheduleNodeTopDown(FoundSUnit, CurCycle);
629  HazardRec->EmitInstruction(FoundSUnit);
630  CycleHasInsts = true;
631  if (HazardRec->atIssueLimit()) {
632  LLVM_DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle
633  << '\n');
634  HazardRec->AdvanceCycle();
635  ++CurCycle;
636  CycleHasInsts = false;
637  }
638  } else {
639  if (CycleHasInsts) {
640  LLVM_DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
641  HazardRec->AdvanceCycle();
642  } else if (!HasNoopHazards) {
643  // Otherwise, we have a pipeline stall, but no other problem,
644  // just advance the current cycle and try again.
645  LLVM_DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
646  HazardRec->AdvanceCycle();
647  ++NumStalls;
648  } else {
649  // Otherwise, we have no instructions to issue and we have instructions
650  // that will fault if we don't do this right. This is the case for
651  // processors without pipeline interlocks and other cases.
652  emitNoop(CurCycle);
653  }
654 
655  ++CurCycle;
656  CycleHasInsts = false;
657  }
658  }
659 
660 #ifndef NDEBUG
661  unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
662  unsigned Noops = 0;
663  for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
664  if (!Sequence[i])
665  ++Noops;
666  assert(Sequence.size() - Noops == ScheduledNodes &&
667  "The number of nodes scheduled doesn't match the expected number!");
668 #endif // NDEBUG
669 }
670 
671 // EmitSchedule - Emit the machine code in scheduled order.
672 void SchedulePostRATDList::EmitSchedule() {
673  RegionBegin = RegionEnd;
674 
675  // If first instruction was a DBG_VALUE then put it back.
676  if (FirstDbgValue)
677  BB->splice(RegionEnd, BB, FirstDbgValue);
678 
679  // Then re-insert them according to the given schedule.
680  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
681  if (SUnit *SU = Sequence[i])
682  BB->splice(RegionEnd, BB, SU->getInstr());
683  else
684  // Null SUnit* is a noop.
685  TII->insertNoop(*BB, RegionEnd);
686 
687  // Update the Begin iterator, as the first instruction in the block
688  // may have been scheduled later.
689  if (i == 0)
690  RegionBegin = std::prev(RegionEnd);
691  }
692 
693  // Reinsert any remaining debug_values.
694  for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
695  DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
696  std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
697  MachineInstr *DbgValue = P.first;
698  MachineBasicBlock::iterator OrigPrivMI = P.second;
699  BB->splice(++OrigPrivMI, BB, DbgValue);
700  }
701  DbgValues.clear();
702  FirstDbgValue = nullptr;
703 }
virtual void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:259
virtual void finishBlock()
Cleans up after scheduling in the given block.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:633
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:250
SI Whole Quad Mode
This class represents lattice values for constants.
Definition: AllocatorList.h:24
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:402
static cl::opt< int > DebugDiv("postra-sched-debugdiv", cl::desc("Debug control MBBs that are scheduled"), cl::init(0), cl::Hidden)
STATISTIC(NumFunctions, "Total number of functions")
SmallVectorImpl< SDep >::iterator succ_iterator
Definition: ScheduleDAG.h:264
CodeGenOpt::Level getOptLevel() const
virtual void startBlock(MachineBasicBlock *BB)
Prepares to perform scheduling in the given block.
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:288
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
Insert a noop into the instruction stream at the specified point.
AnalysisUsage & addRequired()
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
This class works in conjunction with the post-RA scheduler to rename registers to break register anti...
static cl::opt< bool > EnablePostRAScheduler("post-RA-scheduler", cl::desc("Enable scheduling after register allocation"), cl::init(false), cl::Hidden)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
#define LLVM_DUMP_METHOD
Definition: Compiler.h:74
Target-Independent Code Generator Pass Configuration Options.
virtual AntiDepBreakMode getAntiDepBreakMode() const
bool isBundle() const
Itinerary data supplied by a subtarget to be used by a target.
unsigned NumPredsLeft
of preds not scheduled.
Definition: ScheduleDAG.h:272
virtual const TargetInstrInfo * getInstrInfo() const
SUnit * getSUnit() const
Definition: ScheduleDAG.h:484
virtual void enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, unsigned regioninstrs)
Initialize the DAG and common scheduler state for a new scheduling region.
TargetInstrInfo - Interface to description of machine instruction set.
void setDepthToAtLeast(unsigned NewDepth)
If NewDepth is greater than this node&#39;s depth value, sets it to be the new depth value.
Scheduling dependency.
Definition: ScheduleDAG.h:50
bool isAvailable()
Definition: Compression.cpp:48
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
Definition: ScheduleDAG.h:377
unsigned const MachineRegisterInfo * MRI
void clearDAG()
Clears the DAG state (between regions).
Definition: ScheduleDAG.cpp:60
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
INITIALIZE_PASS(PostRAScheduler, DEBUG_TYPE, "Post RA top-down list latency scheduler", false, false) SchedulePostRATDList
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode
Represent the analysis usage information of a pass.
virtual void exitRegion()
Called when the scheduler has finished scheduling the current region.
unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned WeakPredsLeft
of weak preds not scheduled.
Definition: ScheduleDAG.h:274
char & PostRASchedulerID
createPostRAScheduler - This pass performs post register allocation scheduling.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:286
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Test if the given instruction should be considered a scheduling boundary.
virtual void Observe(MachineInstr &MI, unsigned Count, unsigned InsertPosIndex)=0
Update liveness information to account for the current instruction, which will not be scheduled...
MachineFunctionProperties & set(Property P)
TargetSubtargetInfo - Generic base class for all target subtargets.
#define DEBUG_TYPE
static cl::opt< int > DebugMod("postra-sched-debugmod", cl::desc("Debug control MBBs that are scheduled"), cl::init(0), cl::Hidden)
A ScheduleDAG for scheduling lists of MachineInstr.
Representation of each machine instruction.
Definition: MachineInstr.h:64
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
#define I(x, y, z)
Definition: MD5.cpp:58
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
Definition: PtrState.h:41
static cl::opt< std::string > EnableAntiDepBreaking("break-anti-dependencies", cl::desc("Break post-RA scheduling anti-dependencies: " "\ritical\ \ll\ or \one\), cl::init("none"), cl::Hidden)
virtual bool enablePostRAScheduler() const
True if the subtarget should run a scheduler after register allocation.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
aarch64 promote const
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:261
bool isWeak() const
Tests if this a weak dependence.
Definition: ScheduleDAG.h:195
Machine Instruction Scheduler
IRTranslator LLVM IR MI
virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
#define LLVM_DEBUG(X)
Definition: Debug.h:123
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Properties which a MachineFunction may have at a given point in time.
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:246
This file describes how to lower LLVM code to machine code.