LLVM  8.0.1
MachineTraceMetrics.cpp
Go to the documentation of this file.
1 //===- lib/CodeGen/MachineTraceMetrics.cpp --------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
11 #include "llvm/ADT/ArrayRef.h"
12 #include "llvm/ADT/DenseMap.h"
13 #include "llvm/ADT/Optional.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/SparseSet.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/Format.h"
34 #include <algorithm>
35 #include <cassert>
36 #include <iterator>
37 #include <tuple>
38 #include <utility>
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "machine-trace-metrics"
43 
45 
47 
49  "Machine Trace Metrics", false, true)
54 
55 MachineTraceMetrics::MachineTraceMetrics() : MachineFunctionPass(ID) {
56  std::fill(std::begin(Ensembles), std::end(Ensembles), nullptr);
57 }
58 
60  AU.setPreservesAll();
64 }
65 
67  MF = &Func;
68  const TargetSubtargetInfo &ST = MF->getSubtarget();
69  TII = ST.getInstrInfo();
70  TRI = ST.getRegisterInfo();
71  MRI = &MF->getRegInfo();
72  Loops = &getAnalysis<MachineLoopInfo>();
73  SchedModel.init(&ST);
74  BlockInfo.resize(MF->getNumBlockIDs());
75  ProcResourceCycles.resize(MF->getNumBlockIDs() *
76  SchedModel.getNumProcResourceKinds());
77  return false;
78 }
79 
81  MF = nullptr;
82  BlockInfo.clear();
83  for (unsigned i = 0; i != TS_NumStrategies; ++i) {
84  delete Ensembles[i];
85  Ensembles[i] = nullptr;
86  }
87 }
88 
89 //===----------------------------------------------------------------------===//
90 // Fixed block information
91 //===----------------------------------------------------------------------===//
92 //
93 // The number of instructions in a basic block and the CPU resources used by
94 // those instructions don't depend on any given trace strategy.
95 
96 /// Compute the resource usage in basic block MBB.
99  assert(MBB && "No basic block");
100  FixedBlockInfo *FBI = &BlockInfo[MBB->getNumber()];
101  if (FBI->hasResources())
102  return FBI;
103 
104  // Compute resource usage in the block.
105  FBI->HasCalls = false;
106  unsigned InstrCount = 0;
107 
108  // Add up per-processor resource cycles as well.
109  unsigned PRKinds = SchedModel.getNumProcResourceKinds();
110  SmallVector<unsigned, 32> PRCycles(PRKinds);
111 
112  for (const auto &MI : *MBB) {
113  if (MI.isTransient())
114  continue;
115  ++InstrCount;
116  if (MI.isCall())
117  FBI->HasCalls = true;
118 
119  // Count processor resources used.
120  if (!SchedModel.hasInstrSchedModel())
121  continue;
122  const MCSchedClassDesc *SC = SchedModel.resolveSchedClass(&MI);
123  if (!SC->isValid())
124  continue;
125 
127  PI = SchedModel.getWriteProcResBegin(SC),
128  PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) {
129  assert(PI->ProcResourceIdx < PRKinds && "Bad processor resource kind");
130  PRCycles[PI->ProcResourceIdx] += PI->Cycles;
131  }
132  }
133  FBI->InstrCount = InstrCount;
134 
135  // Scale the resource cycles so they are comparable.
136  unsigned PROffset = MBB->getNumber() * PRKinds;
137  for (unsigned K = 0; K != PRKinds; ++K)
138  ProcResourceCycles[PROffset + K] =
139  PRCycles[K] * SchedModel.getResourceFactor(K);
140 
141  return FBI;
142 }
143 
146  assert(BlockInfo[MBBNum].hasResources() &&
147  "getResources() must be called before getProcResourceCycles()");
148  unsigned PRKinds = SchedModel.getNumProcResourceKinds();
149  assert((MBBNum+1) * PRKinds <= ProcResourceCycles.size());
150  return makeArrayRef(ProcResourceCycles.data() + MBBNum * PRKinds, PRKinds);
151 }
152 
153 //===----------------------------------------------------------------------===//
154 // Ensemble utility functions
155 //===----------------------------------------------------------------------===//
156 
158  : MTM(*ct) {
159  BlockInfo.resize(MTM.BlockInfo.size());
160  unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
161  ProcResourceDepths.resize(MTM.BlockInfo.size() * PRKinds);
162  ProcResourceHeights.resize(MTM.BlockInfo.size() * PRKinds);
163 }
164 
165 // Virtual destructor serves as an anchor.
167 
168 const MachineLoop*
170  return MTM.Loops->getLoopFor(MBB);
171 }
172 
173 // Update resource-related information in the TraceBlockInfo for MBB.
174 // Only update resources related to the trace above MBB.
175 void MachineTraceMetrics::Ensemble::
176 computeDepthResources(const MachineBasicBlock *MBB) {
177  TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
178  unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
179  unsigned PROffset = MBB->getNumber() * PRKinds;
180 
181  // Compute resources from trace above. The top block is simple.
182  if (!TBI->Pred) {
183  TBI->InstrDepth = 0;
184  TBI->Head = MBB->getNumber();
185  std::fill(ProcResourceDepths.begin() + PROffset,
186  ProcResourceDepths.begin() + PROffset + PRKinds, 0);
187  return;
188  }
189 
190  // Compute from the block above. A post-order traversal ensures the
191  // predecessor is always computed first.
192  unsigned PredNum = TBI->Pred->getNumber();
193  TraceBlockInfo *PredTBI = &BlockInfo[PredNum];
194  assert(PredTBI->hasValidDepth() && "Trace above has not been computed yet");
195  const FixedBlockInfo *PredFBI = MTM.getResources(TBI->Pred);
196  TBI->InstrDepth = PredTBI->InstrDepth + PredFBI->InstrCount;
197  TBI->Head = PredTBI->Head;
198 
199  // Compute per-resource depths.
200  ArrayRef<unsigned> PredPRDepths = getProcResourceDepths(PredNum);
201  ArrayRef<unsigned> PredPRCycles = MTM.getProcResourceCycles(PredNum);
202  for (unsigned K = 0; K != PRKinds; ++K)
203  ProcResourceDepths[PROffset + K] = PredPRDepths[K] + PredPRCycles[K];
204 }
205 
206 // Update resource-related information in the TraceBlockInfo for MBB.
207 // Only update resources related to the trace below MBB.
208 void MachineTraceMetrics::Ensemble::
209 computeHeightResources(const MachineBasicBlock *MBB) {
210  TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
211  unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
212  unsigned PROffset = MBB->getNumber() * PRKinds;
213 
214  // Compute resources for the current block.
215  TBI->InstrHeight = MTM.getResources(MBB)->InstrCount;
217 
218  // The trace tail is done.
219  if (!TBI->Succ) {
220  TBI->Tail = MBB->getNumber();
221  llvm::copy(PRCycles, ProcResourceHeights.begin() + PROffset);
222  return;
223  }
224 
225  // Compute from the block below. A post-order traversal ensures the
226  // predecessor is always computed first.
227  unsigned SuccNum = TBI->Succ->getNumber();
228  TraceBlockInfo *SuccTBI = &BlockInfo[SuccNum];
229  assert(SuccTBI->hasValidHeight() && "Trace below has not been computed yet");
230  TBI->InstrHeight += SuccTBI->InstrHeight;
231  TBI->Tail = SuccTBI->Tail;
232 
233  // Compute per-resource heights.
234  ArrayRef<unsigned> SuccPRHeights = getProcResourceHeights(SuccNum);
235  for (unsigned K = 0; K != PRKinds; ++K)
236  ProcResourceHeights[PROffset + K] = SuccPRHeights[K] + PRCycles[K];
237 }
238 
239 // Check if depth resources for MBB are valid and return the TBI.
240 // Return NULL if the resources have been invalidated.
244  const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
245  return TBI->hasValidDepth() ? TBI : nullptr;
246 }
247 
248 // Check if height resources for MBB are valid and return the TBI.
249 // Return NULL if the resources have been invalidated.
253  const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
254  return TBI->hasValidHeight() ? TBI : nullptr;
255 }
256 
257 /// Get an array of processor resource depths for MBB. Indexed by processor
258 /// resource kind, this array contains the scaled processor resources consumed
259 /// by all blocks preceding MBB in its trace. It does not include instructions
260 /// in MBB.
261 ///
262 /// Compare TraceBlockInfo::InstrDepth.
265 getProcResourceDepths(unsigned MBBNum) const {
266  unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
267  assert((MBBNum+1) * PRKinds <= ProcResourceDepths.size());
268  return makeArrayRef(ProcResourceDepths.data() + MBBNum * PRKinds, PRKinds);
269 }
270 
271 /// Get an array of processor resource heights for MBB. Indexed by processor
272 /// resource kind, this array contains the scaled processor resources consumed
273 /// by this block and all blocks following it in its trace.
274 ///
275 /// Compare TraceBlockInfo::InstrHeight.
278 getProcResourceHeights(unsigned MBBNum) const {
279  unsigned PRKinds = MTM.SchedModel.getNumProcResourceKinds();
280  assert((MBBNum+1) * PRKinds <= ProcResourceHeights.size());
281  return makeArrayRef(ProcResourceHeights.data() + MBBNum * PRKinds, PRKinds);
282 }
283 
284 //===----------------------------------------------------------------------===//
285 // Trace Selection Strategies
286 //===----------------------------------------------------------------------===//
287 //
288 // A trace selection strategy is implemented as a sub-class of Ensemble. The
289 // trace through a block B is computed by two DFS traversals of the CFG
290 // starting from B. One upwards, and one downwards. During the upwards DFS,
291 // pickTracePred() is called on the post-ordered blocks. During the downwards
292 // DFS, pickTraceSucc() is called in a post-order.
293 //
294 
295 // We never allow traces that leave loops, but we do allow traces to enter
296 // nested loops. We also never allow traces to contain back-edges.
297 //
298 // This means that a loop header can never appear above the center block of a
299 // trace, except as the trace head. Below the center block, loop exiting edges
300 // are banned.
301 //
302 // Return true if an edge from the From loop to the To loop is leaving a loop.
303 // Either of To and From can be null.
304 static bool isExitingLoop(const MachineLoop *From, const MachineLoop *To) {
305  return From && !From->contains(To);
306 }
307 
308 // MinInstrCountEnsemble - Pick the trace that executes the least number of
309 // instructions.
310 namespace {
311 
312 class MinInstrCountEnsemble : public MachineTraceMetrics::Ensemble {
313  const char *getName() const override { return "MinInstr"; }
314  const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) override;
315  const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) override;
316 
317 public:
318  MinInstrCountEnsemble(MachineTraceMetrics *mtm)
320 };
321 
322 } // end anonymous namespace
323 
324 // Select the preferred predecessor for MBB.
325 const MachineBasicBlock*
326 MinInstrCountEnsemble::pickTracePred(const MachineBasicBlock *MBB) {
327  if (MBB->pred_empty())
328  return nullptr;
329  const MachineLoop *CurLoop = getLoopFor(MBB);
330  // Don't leave loops, and never follow back-edges.
331  if (CurLoop && MBB == CurLoop->getHeader())
332  return nullptr;
333  unsigned CurCount = MTM.getResources(MBB)->InstrCount;
334  const MachineBasicBlock *Best = nullptr;
335  unsigned BestDepth = 0;
336  for (const MachineBasicBlock *Pred : MBB->predecessors()) {
337  const MachineTraceMetrics::TraceBlockInfo *PredTBI =
338  getDepthResources(Pred);
339  // Ignore cycles that aren't natural loops.
340  if (!PredTBI)
341  continue;
342  // Pick the predecessor that would give this block the smallest InstrDepth.
343  unsigned Depth = PredTBI->InstrDepth + CurCount;
344  if (!Best || Depth < BestDepth) {
345  Best = Pred;
346  BestDepth = Depth;
347  }
348  }
349  return Best;
350 }
351 
352 // Select the preferred successor for MBB.
353 const MachineBasicBlock*
354 MinInstrCountEnsemble::pickTraceSucc(const MachineBasicBlock *MBB) {
355  if (MBB->pred_empty())
356  return nullptr;
357  const MachineLoop *CurLoop = getLoopFor(MBB);
358  const MachineBasicBlock *Best = nullptr;
359  unsigned BestHeight = 0;
360  for (const MachineBasicBlock *Succ : MBB->successors()) {
361  // Don't consider back-edges.
362  if (CurLoop && Succ == CurLoop->getHeader())
363  continue;
364  // Don't consider successors exiting CurLoop.
365  if (isExitingLoop(CurLoop, getLoopFor(Succ)))
366  continue;
367  const MachineTraceMetrics::TraceBlockInfo *SuccTBI =
368  getHeightResources(Succ);
369  // Ignore cycles that aren't natural loops.
370  if (!SuccTBI)
371  continue;
372  // Pick the successor that would give this block the smallest InstrHeight.
373  unsigned Height = SuccTBI->InstrHeight;
374  if (!Best || Height < BestHeight) {
375  Best = Succ;
376  BestHeight = Height;
377  }
378  }
379  return Best;
380 }
381 
382 // Get an Ensemble sub-class for the requested trace strategy.
385  assert(strategy < TS_NumStrategies && "Invalid trace strategy enum");
386  Ensemble *&E = Ensembles[strategy];
387  if (E)
388  return E;
389 
390  // Allocate new Ensemble on demand.
391  switch (strategy) {
392  case TS_MinInstrCount: return (E = new MinInstrCountEnsemble(this));
393  default: llvm_unreachable("Invalid trace strategy enum");
394  }
395 }
396 
398  LLVM_DEBUG(dbgs() << "Invalidate traces through " << printMBBReference(*MBB)
399  << '\n');
400  BlockInfo[MBB->getNumber()].invalidate();
401  for (unsigned i = 0; i != TS_NumStrategies; ++i)
402  if (Ensembles[i])
403  Ensembles[i]->invalidate(MBB);
404 }
405 
407  if (!MF)
408  return;
409 #ifndef NDEBUG
410  assert(BlockInfo.size() == MF->getNumBlockIDs() && "Outdated BlockInfo size");
411  for (unsigned i = 0; i != TS_NumStrategies; ++i)
412  if (Ensembles[i])
413  Ensembles[i]->verify();
414 #endif
415 }
416 
417 //===----------------------------------------------------------------------===//
418 // Trace building
419 //===----------------------------------------------------------------------===//
420 //
421 // Traces are built by two CFG traversals. To avoid recomputing too much, use a
422 // set abstraction that confines the search to the current loop, and doesn't
423 // revisit blocks.
424 
425 namespace {
426 
427 struct LoopBounds {
430  const MachineLoopInfo *Loops;
431  bool Downward = false;
432 
434  const MachineLoopInfo *loops) : Blocks(blocks), Loops(loops) {}
435 };
436 
437 } // end anonymous namespace
438 
439 // Specialize po_iterator_storage in order to prune the post-order traversal so
440 // it is limited to the current loop and doesn't traverse the loop back edges.
441 namespace llvm {
442 
443 template<>
444 class po_iterator_storage<LoopBounds, true> {
445  LoopBounds &LB;
446 
447 public:
448  po_iterator_storage(LoopBounds &lb) : LB(lb) {}
449 
451 
453  const MachineBasicBlock *To) {
454  // Skip already visited To blocks.
455  MachineTraceMetrics::TraceBlockInfo &TBI = LB.Blocks[To->getNumber()];
456  if (LB.Downward ? TBI.hasValidHeight() : TBI.hasValidDepth())
457  return false;
458  // From is null once when To is the trace center block.
459  if (From) {
460  if (const MachineLoop *FromLoop = LB.Loops->getLoopFor(*From)) {
461  // Don't follow backedges, don't leave FromLoop when going upwards.
462  if ((LB.Downward ? To : *From) == FromLoop->getHeader())
463  return false;
464  // Don't leave FromLoop.
465  if (isExitingLoop(FromLoop, LB.Loops->getLoopFor(To)))
466  return false;
467  }
468  }
469  // To is a new block. Mark the block as visited in case the CFG has cycles
470  // that MachineLoopInfo didn't recognize as a natural loop.
471  return LB.Visited.insert(To).second;
472  }
473 };
474 
475 } // end namespace llvm
476 
477 /// Compute the trace through MBB.
478 void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
479  LLVM_DEBUG(dbgs() << "Computing " << getName() << " trace through "
480  << printMBBReference(*MBB) << '\n');
481  // Set up loop bounds for the backwards post-order traversal.
482  LoopBounds Bounds(BlockInfo, MTM.Loops);
483 
484  // Run an upwards post-order search for the trace start.
485  Bounds.Downward = false;
486  Bounds.Visited.clear();
487  for (auto I : inverse_post_order_ext(MBB, Bounds)) {
488  LLVM_DEBUG(dbgs() << " pred for " << printMBBReference(*I) << ": ");
489  TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
490  // All the predecessors have been visited, pick the preferred one.
491  TBI.Pred = pickTracePred(I);
492  LLVM_DEBUG({
493  if (TBI.Pred)
494  dbgs() << printMBBReference(*TBI.Pred) << '\n';
495  else
496  dbgs() << "null\n";
497  });
498  // The trace leading to I is now known, compute the depth resources.
499  computeDepthResources(I);
500  }
501 
502  // Run a downwards post-order search for the trace end.
503  Bounds.Downward = true;
504  Bounds.Visited.clear();
505  for (auto I : post_order_ext(MBB, Bounds)) {
506  LLVM_DEBUG(dbgs() << " succ for " << printMBBReference(*I) << ": ");
507  TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
508  // All the successors have been visited, pick the preferred one.
509  TBI.Succ = pickTraceSucc(I);
510  LLVM_DEBUG({
511  if (TBI.Succ)
512  dbgs() << printMBBReference(*TBI.Succ) << '\n';
513  else
514  dbgs() << "null\n";
515  });
516  // The trace leaving I is now known, compute the height resources.
517  computeHeightResources(I);
518  }
519 }
520 
521 /// Invalidate traces through BadMBB.
522 void
525  TraceBlockInfo &BadTBI = BlockInfo[BadMBB->getNumber()];
526 
527  // Invalidate height resources of blocks above MBB.
528  if (BadTBI.hasValidHeight()) {
529  BadTBI.invalidateHeight();
530  WorkList.push_back(BadMBB);
531  do {
532  const MachineBasicBlock *MBB = WorkList.pop_back_val();
533  LLVM_DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
534  << getName() << " height.\n");
535  // Find any MBB predecessors that have MBB as their preferred successor.
536  // They are the only ones that need to be invalidated.
537  for (const MachineBasicBlock *Pred : MBB->predecessors()) {
538  TraceBlockInfo &TBI = BlockInfo[Pred->getNumber()];
539  if (!TBI.hasValidHeight())
540  continue;
541  if (TBI.Succ == MBB) {
542  TBI.invalidateHeight();
543  WorkList.push_back(Pred);
544  continue;
545  }
546  // Verify that TBI.Succ is actually a *I successor.
547  assert((!TBI.Succ || Pred->isSuccessor(TBI.Succ)) && "CFG changed");
548  }
549  } while (!WorkList.empty());
550  }
551 
552  // Invalidate depth resources of blocks below MBB.
553  if (BadTBI.hasValidDepth()) {
554  BadTBI.invalidateDepth();
555  WorkList.push_back(BadMBB);
556  do {
557  const MachineBasicBlock *MBB = WorkList.pop_back_val();
558  LLVM_DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
559  << getName() << " depth.\n");
560  // Find any MBB successors that have MBB as their preferred predecessor.
561  // They are the only ones that need to be invalidated.
562  for (const MachineBasicBlock *Succ : MBB->successors()) {
563  TraceBlockInfo &TBI = BlockInfo[Succ->getNumber()];
564  if (!TBI.hasValidDepth())
565  continue;
566  if (TBI.Pred == MBB) {
567  TBI.invalidateDepth();
568  WorkList.push_back(Succ);
569  continue;
570  }
571  // Verify that TBI.Pred is actually a *I predecessor.
572  assert((!TBI.Pred || Succ->isPredecessor(TBI.Pred)) && "CFG changed");
573  }
574  } while (!WorkList.empty());
575  }
576 
577  // Clear any per-instruction data. We only have to do this for BadMBB itself
578  // because the instructions in that block may change. Other blocks may be
579  // invalidated, but their instructions will stay the same, so there is no
580  // need to erase the Cycle entries. They will be overwritten when we
581  // recompute.
582  for (const auto &I : *BadMBB)
583  Cycles.erase(&I);
584 }
585 
587 #ifndef NDEBUG
588  assert(BlockInfo.size() == MTM.MF->getNumBlockIDs() &&
589  "Outdated BlockInfo size");
590  for (unsigned Num = 0, e = BlockInfo.size(); Num != e; ++Num) {
591  const TraceBlockInfo &TBI = BlockInfo[Num];
592  if (TBI.hasValidDepth() && TBI.Pred) {
593  const MachineBasicBlock *MBB = MTM.MF->getBlockNumbered(Num);
594  assert(MBB->isPredecessor(TBI.Pred) && "CFG doesn't match trace");
595  assert(BlockInfo[TBI.Pred->getNumber()].hasValidDepth() &&
596  "Trace is broken, depth should have been invalidated.");
597  const MachineLoop *Loop = getLoopFor(MBB);
598  assert(!(Loop && MBB == Loop->getHeader()) && "Trace contains backedge");
599  }
600  if (TBI.hasValidHeight() && TBI.Succ) {
601  const MachineBasicBlock *MBB = MTM.MF->getBlockNumbered(Num);
602  assert(MBB->isSuccessor(TBI.Succ) && "CFG doesn't match trace");
603  assert(BlockInfo[TBI.Succ->getNumber()].hasValidHeight() &&
604  "Trace is broken, height should have been invalidated.");
605  const MachineLoop *Loop = getLoopFor(MBB);
606  const MachineLoop *SuccLoop = getLoopFor(TBI.Succ);
607  assert(!(Loop && Loop == SuccLoop && TBI.Succ == Loop->getHeader()) &&
608  "Trace contains backedge");
609  }
610  }
611 #endif
612 }
613 
614 //===----------------------------------------------------------------------===//
615 // Data Dependencies
616 //===----------------------------------------------------------------------===//
617 //
618 // Compute the depth and height of each instruction based on data dependencies
619 // and instruction latencies. These cycle numbers assume that the CPU can issue
620 // an infinite number of instructions per cycle as long as their dependencies
621 // are ready.
622 
623 // A data dependency is represented as a defining MI and operand numbers on the
624 // defining and using MI.
625 namespace {
626 
627 struct DataDep {
628  const MachineInstr *DefMI;
629  unsigned DefOp;
630  unsigned UseOp;
631 
632  DataDep(const MachineInstr *DefMI, unsigned DefOp, unsigned UseOp)
633  : DefMI(DefMI), DefOp(DefOp), UseOp(UseOp) {}
634 
635  /// Create a DataDep from an SSA form virtual register.
636  DataDep(const MachineRegisterInfo *MRI, unsigned VirtReg, unsigned UseOp)
637  : UseOp(UseOp) {
639  MachineRegisterInfo::def_iterator DefI = MRI->def_begin(VirtReg);
640  assert(!DefI.atEnd() && "Register has no defs");
641  DefMI = DefI->getParent();
642  DefOp = DefI.getOperandNo();
643  assert((++DefI).atEnd() && "Register has multiple defs");
644  }
645 };
646 
647 } // end anonymous namespace
648 
649 // Get the input data dependencies that must be ready before UseMI can issue.
650 // Return true if UseMI has any physreg operands.
651 static bool getDataDeps(const MachineInstr &UseMI,
653  const MachineRegisterInfo *MRI) {
654  // Debug values should not be included in any calculations.
655  if (UseMI.isDebugInstr())
656  return false;
657 
658  bool HasPhysRegs = false;
660  E = UseMI.operands_end(); I != E; ++I) {
661  const MachineOperand &MO = *I;
662  if (!MO.isReg())
663  continue;
664  unsigned Reg = MO.getReg();
665  if (!Reg)
666  continue;
668  HasPhysRegs = true;
669  continue;
670  }
671  // Collect virtual register reads.
672  if (MO.readsReg())
673  Deps.push_back(DataDep(MRI, Reg, UseMI.getOperandNo(I)));
674  }
675  return HasPhysRegs;
676 }
677 
678 // Get the input data dependencies of a PHI instruction, using Pred as the
679 // preferred predecessor.
680 // This will add at most one dependency to Deps.
681 static void getPHIDeps(const MachineInstr &UseMI,
683  const MachineBasicBlock *Pred,
684  const MachineRegisterInfo *MRI) {
685  // No predecessor at the beginning of a trace. Ignore dependencies.
686  if (!Pred)
687  return;
688  assert(UseMI.isPHI() && UseMI.getNumOperands() % 2 && "Bad PHI");
689  for (unsigned i = 1; i != UseMI.getNumOperands(); i += 2) {
690  if (UseMI.getOperand(i + 1).getMBB() == Pred) {
691  unsigned Reg = UseMI.getOperand(i).getReg();
692  Deps.push_back(DataDep(MRI, Reg, i));
693  return;
694  }
695  }
696 }
697 
698 // Identify physreg dependencies for UseMI, and update the live regunit
699 // tracking set when scanning instructions downwards.
702  SparseSet<LiveRegUnit> &RegUnits,
703  const TargetRegisterInfo *TRI) {
705  SmallVector<unsigned, 8> LiveDefOps;
706 
708  ME = UseMI->operands_end(); MI != ME; ++MI) {
709  const MachineOperand &MO = *MI;
710  if (!MO.isReg())
711  continue;
712  unsigned Reg = MO.getReg();
714  continue;
715  // Track live defs and kills for updating RegUnits.
716  if (MO.isDef()) {
717  if (MO.isDead())
718  Kills.push_back(Reg);
719  else
720  LiveDefOps.push_back(UseMI->getOperandNo(MI));
721  } else if (MO.isKill())
722  Kills.push_back(Reg);
723  // Identify dependencies.
724  if (!MO.readsReg())
725  continue;
726  for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
727  SparseSet<LiveRegUnit>::iterator I = RegUnits.find(*Units);
728  if (I == RegUnits.end())
729  continue;
730  Deps.push_back(DataDep(I->MI, I->Op, UseMI->getOperandNo(MI)));
731  break;
732  }
733  }
734 
735  // Update RegUnits to reflect live registers after UseMI.
736  // First kills.
737  for (unsigned Kill : Kills)
738  for (MCRegUnitIterator Units(Kill, TRI); Units.isValid(); ++Units)
739  RegUnits.erase(*Units);
740 
741  // Second, live defs.
742  for (unsigned DefOp : LiveDefOps) {
743  for (MCRegUnitIterator Units(UseMI->getOperand(DefOp).getReg(), TRI);
744  Units.isValid(); ++Units) {
745  LiveRegUnit &LRU = RegUnits[*Units];
746  LRU.MI = UseMI;
747  LRU.Op = DefOp;
748  }
749  }
750 }
751 
752 /// The length of the critical path through a trace is the maximum of two path
753 /// lengths:
754 ///
755 /// 1. The maximum height+depth over all instructions in the trace center block.
756 ///
757 /// 2. The longest cross-block dependency chain. For small blocks, it is
758 /// possible that the critical path through the trace doesn't include any
759 /// instructions in the block.
760 ///
761 /// This function computes the second number from the live-in list of the
762 /// center block.
763 unsigned MachineTraceMetrics::Ensemble::
764 computeCrossBlockCriticalPath(const TraceBlockInfo &TBI) {
765  assert(TBI.HasValidInstrDepths && "Missing depth info");
766  assert(TBI.HasValidInstrHeights && "Missing height info");
767  unsigned MaxLen = 0;
768  for (const LiveInReg &LIR : TBI.LiveIns) {
770  continue;
771  const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
772  // Ignore dependencies outside the current trace.
773  const TraceBlockInfo &DefTBI = BlockInfo[DefMI->getParent()->getNumber()];
774  if (!DefTBI.isUsefulDominator(TBI))
775  continue;
776  unsigned Len = LIR.Height + Cycles[DefMI].Depth;
777  MaxLen = std::max(MaxLen, Len);
778  }
779  return MaxLen;
780 }
781 
784  SparseSet<LiveRegUnit> &RegUnits) {
786  // Collect all data dependencies.
787  if (UseMI.isPHI())
788  getPHIDeps(UseMI, Deps, TBI.Pred, MTM.MRI);
789  else if (getDataDeps(UseMI, Deps, MTM.MRI))
790  updatePhysDepsDownwards(&UseMI, Deps, RegUnits, MTM.TRI);
791 
792  // Filter and process dependencies, computing the earliest issue cycle.
793  unsigned Cycle = 0;
794  for (const DataDep &Dep : Deps) {
795  const TraceBlockInfo&DepTBI =
796  BlockInfo[Dep.DefMI->getParent()->getNumber()];
797  // Ignore dependencies from outside the current trace.
798  if (!DepTBI.isUsefulDominator(TBI))
799  continue;
800  assert(DepTBI.HasValidInstrDepths && "Inconsistent dependency");
801  unsigned DepCycle = Cycles.lookup(Dep.DefMI).Depth;
802  // Add latency if DefMI is a real instruction. Transients get latency 0.
803  if (!Dep.DefMI->isTransient())
804  DepCycle += MTM.SchedModel
805  .computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI, Dep.UseOp);
806  Cycle = std::max(Cycle, DepCycle);
807  }
808  // Remember the instruction depth.
809  InstrCycles &MICycles = Cycles[&UseMI];
810  MICycles.Depth = Cycle;
811 
812  if (TBI.HasValidInstrHeights) {
813  // Update critical path length.
814  TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Height);
815  LLVM_DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << UseMI);
816  } else {
817  LLVM_DEBUG(dbgs() << Cycle << '\t' << UseMI);
818  }
819 }
820 
823  SparseSet<LiveRegUnit> &RegUnits) {
824  updateDepth(BlockInfo[MBB->getNumber()], UseMI, RegUnits);
825 }
826 
830  SparseSet<LiveRegUnit> &RegUnits) {
831  for (; Start != End; Start++)
832  updateDepth(Start->getParent(), *Start, RegUnits);
833 }
834 
835 /// Compute instruction depths for all instructions above or in MBB in its
836 /// trace. This assumes that the trace through MBB has already been computed.
837 void MachineTraceMetrics::Ensemble::
838 computeInstrDepths(const MachineBasicBlock *MBB) {
839  // The top of the trace may already be computed, and HasValidInstrDepths
840  // implies Head->HasValidInstrDepths, so we only need to start from the first
841  // block in the trace that needs to be recomputed.
843  do {
844  TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
845  assert(TBI.hasValidDepth() && "Incomplete trace");
846  if (TBI.HasValidInstrDepths)
847  break;
848  Stack.push_back(MBB);
849  MBB = TBI.Pred;
850  } while (MBB);
851 
852  // FIXME: If MBB is non-null at this point, it is the last pre-computed block
853  // in the trace. We should track any live-out physregs that were defined in
854  // the trace. This is quite rare in SSA form, typically created by CSE
855  // hoisting a compare.
856  SparseSet<LiveRegUnit> RegUnits;
857  RegUnits.setUniverse(MTM.TRI->getNumRegUnits());
858 
859  // Go through trace blocks in top-down order, stopping after the center block.
860  while (!Stack.empty()) {
861  MBB = Stack.pop_back_val();
862  LLVM_DEBUG(dbgs() << "\nDepths for " << printMBBReference(*MBB) << ":\n");
863  TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
864  TBI.HasValidInstrDepths = true;
865  TBI.CriticalPath = 0;
866 
867  // Print out resource depths here as well.
868  LLVM_DEBUG({
869  dbgs() << format("%7u Instructions\n", TBI.InstrDepth);
871  for (unsigned K = 0; K != PRDepths.size(); ++K)
872  if (PRDepths[K]) {
873  unsigned Factor = MTM.SchedModel.getResourceFactor(K);
874  dbgs() << format("%6uc @ ", MTM.getCycles(PRDepths[K]))
875  << MTM.SchedModel.getProcResource(K)->Name << " ("
876  << PRDepths[K]/Factor << " ops x" << Factor << ")\n";
877  }
878  });
879 
880  // Also compute the critical path length through MBB when possible.
881  if (TBI.HasValidInstrHeights)
882  TBI.CriticalPath = computeCrossBlockCriticalPath(TBI);
883 
884  for (const auto &UseMI : *MBB) {
885  updateDepth(TBI, UseMI, RegUnits);
886  }
887  }
888 }
889 
890 // Identify physreg dependencies for MI when scanning instructions upwards.
891 // Return the issue height of MI after considering any live regunits.
892 // Height is the issue height computed from virtual register dependencies alone.
893 static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height,
894  SparseSet<LiveRegUnit> &RegUnits,
895  const TargetSchedModel &SchedModel,
896  const TargetInstrInfo *TII,
897  const TargetRegisterInfo *TRI) {
898  SmallVector<unsigned, 8> ReadOps;
899 
901  MOE = MI.operands_end();
902  MOI != MOE; ++MOI) {
903  const MachineOperand &MO = *MOI;
904  if (!MO.isReg())
905  continue;
906  unsigned Reg = MO.getReg();
908  continue;
909  if (MO.readsReg())
910  ReadOps.push_back(MI.getOperandNo(MOI));
911  if (!MO.isDef())
912  continue;
913  // This is a def of Reg. Remove corresponding entries from RegUnits, and
914  // update MI Height to consider the physreg dependencies.
915  for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
916  SparseSet<LiveRegUnit>::iterator I = RegUnits.find(*Units);
917  if (I == RegUnits.end())
918  continue;
919  unsigned DepHeight = I->Cycle;
920  if (!MI.isTransient()) {
921  // We may not know the UseMI of this dependency, if it came from the
922  // live-in list. SchedModel can handle a NULL UseMI.
923  DepHeight += SchedModel.computeOperandLatency(&MI, MI.getOperandNo(MOI),
924  I->MI, I->Op);
925  }
926  Height = std::max(Height, DepHeight);
927  // This regunit is dead above MI.
928  RegUnits.erase(I);
929  }
930  }
931 
932  // Now we know the height of MI. Update any regunits read.
933  for (unsigned i = 0, e = ReadOps.size(); i != e; ++i) {
934  unsigned Reg = MI.getOperand(ReadOps[i]).getReg();
935  for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
936  LiveRegUnit &LRU = RegUnits[*Units];
937  // Set the height to the highest reader of the unit.
938  if (LRU.Cycle <= Height && LRU.MI != &MI) {
939  LRU.Cycle = Height;
940  LRU.MI = &MI;
941  LRU.Op = ReadOps[i];
942  }
943  }
944  }
945 
946  return Height;
947 }
948 
950 
951 // Push the height of DefMI upwards if required to match UseMI.
952 // Return true if this is the first time DefMI was seen.
953 static bool pushDepHeight(const DataDep &Dep, const MachineInstr &UseMI,
954  unsigned UseHeight, MIHeightMap &Heights,
955  const TargetSchedModel &SchedModel,
956  const TargetInstrInfo *TII) {
957  // Adjust height by Dep.DefMI latency.
958  if (!Dep.DefMI->isTransient())
959  UseHeight += SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI,
960  Dep.UseOp);
961 
962  // Update Heights[DefMI] to be the maximum height seen.
964  bool New;
965  std::tie(I, New) = Heights.insert(std::make_pair(Dep.DefMI, UseHeight));
966  if (New)
967  return true;
968 
969  // DefMI has been pushed before. Give it the max height.
970  if (I->second < UseHeight)
971  I->second = UseHeight;
972  return false;
973 }
974 
975 /// Assuming that the virtual register defined by DefMI:DefOp was used by
976 /// Trace.back(), add it to the live-in lists of all the blocks in Trace. Stop
977 /// when reaching the block that contains DefMI.
978 void MachineTraceMetrics::Ensemble::
979 addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
981  assert(!Trace.empty() && "Trace should contain at least one block");
982  unsigned Reg = DefMI->getOperand(DefOp).getReg();
984  const MachineBasicBlock *DefMBB = DefMI->getParent();
985 
986  // Reg is live-in to all blocks in Trace that follow DefMBB.
987  for (unsigned i = Trace.size(); i; --i) {
988  const MachineBasicBlock *MBB = Trace[i-1];
989  if (MBB == DefMBB)
990  return;
991  TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
992  // Just add the register. The height will be updated later.
993  TBI.LiveIns.push_back(Reg);
994  }
995 }
996 
997 /// Compute instruction heights in the trace through MBB. This updates MBB and
998 /// the blocks below it in the trace. It is assumed that the trace has already
999 /// been computed.
1000 void MachineTraceMetrics::Ensemble::
1001 computeInstrHeights(const MachineBasicBlock *MBB) {
1002  // The bottom of the trace may already be computed.
1003  // Find the blocks that need updating.
1005  do {
1006  TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
1007  assert(TBI.hasValidHeight() && "Incomplete trace");
1008  if (TBI.HasValidInstrHeights)
1009  break;
1010  Stack.push_back(MBB);
1011  TBI.LiveIns.clear();
1012  MBB = TBI.Succ;
1013  } while (MBB);
1014 
1015  // As we move upwards in the trace, keep track of instructions that are
1016  // required by deeper trace instructions. Map MI -> height required so far.
1017  MIHeightMap Heights;
1018 
1019  // For physregs, the def isn't known when we see the use.
1020  // Instead, keep track of the highest use of each regunit.
1021  SparseSet<LiveRegUnit> RegUnits;
1022  RegUnits.setUniverse(MTM.TRI->getNumRegUnits());
1023 
1024  // If the bottom of the trace was already precomputed, initialize heights
1025  // from its live-in list.
1026  // MBB is the highest precomputed block in the trace.
1027  if (MBB) {
1028  TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
1029  for (LiveInReg &LI : TBI.LiveIns) {
1031  // For virtual registers, the def latency is included.
1032  unsigned &Height = Heights[MTM.MRI->getVRegDef(LI.Reg)];
1033  if (Height < LI.Height)
1034  Height = LI.Height;
1035  } else {
1036  // For register units, the def latency is not included because we don't
1037  // know the def yet.
1038  RegUnits[LI.Reg].Cycle = LI.Height;
1039  }
1040  }
1041  }
1042 
1043  // Go through the trace blocks in bottom-up order.
1045  for (;!Stack.empty(); Stack.pop_back()) {
1046  MBB = Stack.back();
1047  LLVM_DEBUG(dbgs() << "Heights for " << printMBBReference(*MBB) << ":\n");
1048  TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
1049  TBI.HasValidInstrHeights = true;
1050  TBI.CriticalPath = 0;
1051 
1052  LLVM_DEBUG({
1053  dbgs() << format("%7u Instructions\n", TBI.InstrHeight);
1055  for (unsigned K = 0; K != PRHeights.size(); ++K)
1056  if (PRHeights[K]) {
1057  unsigned Factor = MTM.SchedModel.getResourceFactor(K);
1058  dbgs() << format("%6uc @ ", MTM.getCycles(PRHeights[K]))
1059  << MTM.SchedModel.getProcResource(K)->Name << " ("
1060  << PRHeights[K]/Factor << " ops x" << Factor << ")\n";
1061  }
1062  });
1063 
1064  // Get dependencies from PHIs in the trace successor.
1065  const MachineBasicBlock *Succ = TBI.Succ;
1066  // If MBB is the last block in the trace, and it has a back-edge to the
1067  // loop header, get loop-carried dependencies from PHIs in the header. For
1068  // that purpose, pretend that all the loop header PHIs have height 0.
1069  if (!Succ)
1070  if (const MachineLoop *Loop = getLoopFor(MBB))
1071  if (MBB->isSuccessor(Loop->getHeader()))
1072  Succ = Loop->getHeader();
1073 
1074  if (Succ) {
1075  for (const auto &PHI : *Succ) {
1076  if (!PHI.isPHI())
1077  break;
1078  Deps.clear();
1079  getPHIDeps(PHI, Deps, MBB, MTM.MRI);
1080  if (!Deps.empty()) {
1081  // Loop header PHI heights are all 0.
1082  unsigned Height = TBI.Succ ? Cycles.lookup(&PHI).Height : 0;
1083  LLVM_DEBUG(dbgs() << "pred\t" << Height << '\t' << PHI);
1084  if (pushDepHeight(Deps.front(), PHI, Height, Heights, MTM.SchedModel,
1085  MTM.TII))
1086  addLiveIns(Deps.front().DefMI, Deps.front().DefOp, Stack);
1087  }
1088  }
1089  }
1090 
1091  // Go through the block backwards.
1092  for (MachineBasicBlock::const_iterator BI = MBB->end(), BB = MBB->begin();
1093  BI != BB;) {
1094  const MachineInstr &MI = *--BI;
1095 
1096  // Find the MI height as determined by virtual register uses in the
1097  // trace below.
1098  unsigned Cycle = 0;
1099  MIHeightMap::iterator HeightI = Heights.find(&MI);
1100  if (HeightI != Heights.end()) {
1101  Cycle = HeightI->second;
1102  // We won't be seeing any more MI uses.
1103  Heights.erase(HeightI);
1104  }
1105 
1106  // Don't process PHI deps. They depend on the specific predecessor, and
1107  // we'll get them when visiting the predecessor.
1108  Deps.clear();
1109  bool HasPhysRegs = !MI.isPHI() && getDataDeps(MI, Deps, MTM.MRI);
1110 
1111  // There may also be regunit dependencies to include in the height.
1112  if (HasPhysRegs)
1113  Cycle = updatePhysDepsUpwards(MI, Cycle, RegUnits, MTM.SchedModel,
1114  MTM.TII, MTM.TRI);
1115 
1116  // Update the required height of any virtual registers read by MI.
1117  for (const DataDep &Dep : Deps)
1118  if (pushDepHeight(Dep, MI, Cycle, Heights, MTM.SchedModel, MTM.TII))
1119  addLiveIns(Dep.DefMI, Dep.DefOp, Stack);
1120 
1121  InstrCycles &MICycles = Cycles[&MI];
1122  MICycles.Height = Cycle;
1123  if (!TBI.HasValidInstrDepths) {
1124  LLVM_DEBUG(dbgs() << Cycle << '\t' << MI);
1125  continue;
1126  }
1127  // Update critical path length.
1128  TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Depth);
1129  LLVM_DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << MI);
1130  }
1131 
1132  // Update virtual live-in heights. They were added by addLiveIns() with a 0
1133  // height because the final height isn't known until now.
1134  LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " Live-ins:");
1135  for (LiveInReg &LIR : TBI.LiveIns) {
1136  const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
1137  LIR.Height = Heights.lookup(DefMI);
1138  LLVM_DEBUG(dbgs() << ' ' << printReg(LIR.Reg) << '@' << LIR.Height);
1139  }
1140 
1141  // Transfer the live regunits to the live-in list.
1143  RI = RegUnits.begin(), RE = RegUnits.end(); RI != RE; ++RI) {
1144  TBI.LiveIns.push_back(LiveInReg(RI->RegUnit, RI->Cycle));
1145  LLVM_DEBUG(dbgs() << ' ' << printRegUnit(RI->RegUnit, MTM.TRI) << '@'
1146  << RI->Cycle);
1147  }
1148  LLVM_DEBUG(dbgs() << '\n');
1149 
1150  if (!TBI.HasValidInstrDepths)
1151  continue;
1152  // Add live-ins to the critical path length.
1153  TBI.CriticalPath = std::max(TBI.CriticalPath,
1154  computeCrossBlockCriticalPath(TBI));
1155  LLVM_DEBUG(dbgs() << "Critical path: " << TBI.CriticalPath << '\n');
1156  }
1157 }
1158 
1161  TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
1162 
1163  if (!TBI.hasValidDepth() || !TBI.hasValidHeight())
1164  computeTrace(MBB);
1165  if (!TBI.HasValidInstrDepths)
1166  computeInstrDepths(MBB);
1167  if (!TBI.HasValidInstrHeights)
1168  computeInstrHeights(MBB);
1169 
1170  return Trace(*this, TBI);
1171 }
1172 
1173 unsigned
1175  assert(getBlockNum() == unsigned(MI.getParent()->getNumber()) &&
1176  "MI must be in the trace center block");
1177  InstrCycles Cyc = getInstrCycles(MI);
1178  return getCriticalPath() - (Cyc.Depth + Cyc.Height);
1179 }
1180 
1181 unsigned
1183  const MachineBasicBlock *MBB = TE.MTM.MF->getBlockNumbered(getBlockNum());
1185  getPHIDeps(PHI, Deps, MBB, TE.MTM.MRI);
1186  assert(Deps.size() == 1 && "PHI doesn't have MBB as a predecessor");
1187  DataDep &Dep = Deps.front();
1188  unsigned DepCycle = getInstrCycles(*Dep.DefMI).Depth;
1189  // Add latency if DefMI is a real instruction. Transients get latency 0.
1190  if (!Dep.DefMI->isTransient())
1191  DepCycle += TE.MTM.SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp,
1192  &PHI, Dep.UseOp);
1193  return DepCycle;
1194 }
1195 
1196 /// When bottom is set include instructions in current block in estimate.
1198  // Find the limiting processor resource.
1199  // Numbers have been pre-scaled to be comparable.
1200  unsigned PRMax = 0;
1201  ArrayRef<unsigned> PRDepths = TE.getProcResourceDepths(getBlockNum());
1202  if (Bottom) {
1203  ArrayRef<unsigned> PRCycles = TE.MTM.getProcResourceCycles(getBlockNum());
1204  for (unsigned K = 0; K != PRDepths.size(); ++K)
1205  PRMax = std::max(PRMax, PRDepths[K] + PRCycles[K]);
1206  } else {
1207  for (unsigned K = 0; K != PRDepths.size(); ++K)
1208  PRMax = std::max(PRMax, PRDepths[K]);
1209  }
1210  // Convert to cycle count.
1211  PRMax = TE.MTM.getCycles(PRMax);
1212 
1213  /// All instructions before current block
1214  unsigned Instrs = TBI.InstrDepth;
1215  // plus instructions in current block
1216  if (Bottom)
1217  Instrs += TE.MTM.BlockInfo[getBlockNum()].InstrCount;
1218  if (unsigned IW = TE.MTM.SchedModel.getIssueWidth())
1219  Instrs /= IW;
1220  // Assume issue width 1 without a schedule model.
1221  return std::max(Instrs, PRMax);
1222 }
1223 
1227  ArrayRef<const MCSchedClassDesc *> RemoveInstrs) const {
1228  // Add up resources above and below the center block.
1229  ArrayRef<unsigned> PRDepths = TE.getProcResourceDepths(getBlockNum());
1230  ArrayRef<unsigned> PRHeights = TE.getProcResourceHeights(getBlockNum());
1231  unsigned PRMax = 0;
1232 
1233  // Capture computing cycles from extra instructions
1234  auto extraCycles = [this](ArrayRef<const MCSchedClassDesc *> Instrs,
1235  unsigned ResourceIdx)
1236  ->unsigned {
1237  unsigned Cycles = 0;
1238  for (const MCSchedClassDesc *SC : Instrs) {
1239  if (!SC->isValid())
1240  continue;
1242  PI = TE.MTM.SchedModel.getWriteProcResBegin(SC),
1243  PE = TE.MTM.SchedModel.getWriteProcResEnd(SC);
1244  PI != PE; ++PI) {
1245  if (PI->ProcResourceIdx != ResourceIdx)
1246  continue;
1247  Cycles +=
1248  (PI->Cycles * TE.MTM.SchedModel.getResourceFactor(ResourceIdx));
1249  }
1250  }
1251  return Cycles;
1252  };
1253 
1254  for (unsigned K = 0; K != PRDepths.size(); ++K) {
1255  unsigned PRCycles = PRDepths[K] + PRHeights[K];
1256  for (const MachineBasicBlock *MBB : Extrablocks)
1257  PRCycles += TE.MTM.getProcResourceCycles(MBB->getNumber())[K];
1258  PRCycles += extraCycles(ExtraInstrs, K);
1259  PRCycles -= extraCycles(RemoveInstrs, K);
1260  PRMax = std::max(PRMax, PRCycles);
1261  }
1262  // Convert to cycle count.
1263  PRMax = TE.MTM.getCycles(PRMax);
1264 
1265  // Instrs: #instructions in current trace outside current block.
1266  unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight;
1267  // Add instruction count from the extra blocks.
1268  for (const MachineBasicBlock *MBB : Extrablocks)
1269  Instrs += TE.MTM.getResources(MBB)->InstrCount;
1270  Instrs += ExtraInstrs.size();
1271  Instrs -= RemoveInstrs.size();
1272  if (unsigned IW = TE.MTM.SchedModel.getIssueWidth())
1273  Instrs /= IW;
1274  // Assume issue width 1 without a schedule model.
1275  return std::max(Instrs, PRMax);
1276 }
1277 
1279  const MachineInstr &UseMI) const {
1280  if (DefMI.getParent() == UseMI.getParent())
1281  return true;
1282 
1283  const TraceBlockInfo &DepTBI = TE.BlockInfo[DefMI.getParent()->getNumber()];
1284  const TraceBlockInfo &TBI = TE.BlockInfo[UseMI.getParent()->getNumber()];
1285 
1286  return DepTBI.isUsefulDominator(TBI);
1287 }
1288 
1290  OS << getName() << " ensemble:\n";
1291  for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) {
1292  OS << " %bb." << i << '\t';
1293  BlockInfo[i].print(OS);
1294  OS << '\n';
1295  }
1296 }
1297 
1299  if (hasValidDepth()) {
1300  OS << "depth=" << InstrDepth;
1301  if (Pred)
1302  OS << " pred=" << printMBBReference(*Pred);
1303  else
1304  OS << " pred=null";
1305  OS << " head=%bb." << Head;
1306  if (HasValidInstrDepths)
1307  OS << " +instrs";
1308  } else
1309  OS << "depth invalid";
1310  OS << ", ";
1311  if (hasValidHeight()) {
1312  OS << "height=" << InstrHeight;
1313  if (Succ)
1314  OS << " succ=" << printMBBReference(*Succ);
1315  else
1316  OS << " succ=null";
1317  OS << " tail=%bb." << Tail;
1318  if (HasValidInstrHeights)
1319  OS << " +instrs";
1320  } else
1321  OS << "height invalid";
1322  if (HasValidInstrDepths && HasValidInstrHeights)
1323  OS << ", crit=" << CriticalPath;
1324 }
1325 
1327  unsigned MBBNum = &TBI - &TE.BlockInfo[0];
1328 
1329  OS << TE.getName() << " trace %bb." << TBI.Head << " --> %bb." << MBBNum
1330  << " --> %bb." << TBI.Tail << ':';
1331  if (TBI.hasValidHeight() && TBI.hasValidDepth())
1332  OS << ' ' << getInstrCount() << " instrs.";
1333  if (TBI.HasValidInstrDepths && TBI.HasValidInstrHeights)
1334  OS << ' ' << TBI.CriticalPath << " cycles.";
1335 
1336  const MachineTraceMetrics::TraceBlockInfo *Block = &TBI;
1337  OS << "\n%bb." << MBBNum;
1338  while (Block->hasValidDepth() && Block->Pred) {
1339  unsigned Num = Block->Pred->getNumber();
1340  OS << " <- " << printMBBReference(*Block->Pred);
1341  Block = &TE.BlockInfo[Num];
1342  }
1343 
1344  Block = &TBI;
1345  OS << "\n ";
1346  while (Block->hasValidHeight() && Block->Succ) {
1347  unsigned Num = Block->Succ->getNumber();
1348  OS << " -> " << printMBBReference(*Block->Succ);
1349  Block = &TE.BlockInfo[Num];
1350  }
1351  OS << '\n';
1352 }
bool HasValidInstrDepths
Instruction depths have been computed. This implies hasValidDepth().
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:259
mop_iterator operands_end()
Definition: MachineInstr.h:454
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:250
ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const
MachineBasicBlock * getMBB() const
This class represents lattice values for constants.
Definition: AllocatorList.h:24
const MachineBasicBlock * Pred
Trace predecessor, or NULL for the first block in the trace.
void updateDepths(MachineBasicBlock::iterator Start, MachineBasicBlock::iterator End, SparseSet< LiveRegUnit > &RegUnits)
Updates the depth of the instructions from Start to End.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID&#39;s allocated.
unsigned Depth
Earliest issue cycle as determined by data dependencies and instruction latencies from the beginning ...
unsigned getReg() const
getReg - Returns the register number.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:509
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool HasCalls
True when the block contains calls.
unsigned Reg
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition: Format.h:124
const MachineLoop * getLoopFor(const MachineBasicBlock *) const
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
void invalidateHeight()
Invalidate height resources when a block below this one has changed.
ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
A trace ensemble is a collection of traces selected using the same strategy, for example &#39;minimum res...
block Block Frequency true
static unsigned InstrCount
bool isPHI() const
unsigned getNumRegUnits() const
Return the number of (native) register units in the target.
unsigned InstrCount
The number of non-trivial instructions in the block.
void updateDepth(TraceBlockInfo &TBI, const MachineInstr &, SparseSet< LiveRegUnit > &RegUnits)
Updates the depth of an machine instruction, given RegUnits.
iterator_range< succ_iterator > successors()
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
Hexagon Hardware Loops
bool hasResources() const
Returns true when resource information for this block has been computed.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Provide an instruction scheduling machine model to CodeGen passes.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:412
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Strategy
Strategies for selecting traces.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
const MachineInstr * MI
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
def_iterator def_begin(unsigned RegNo) const
ArrayRef< unsigned > getProcResourceHeights(unsigned MBBNum) const
Get an array of processor resource heights for MBB.
bool hasInstrSchedModel() const
Return true if this machine model includes an instruction-level scheduling model. ...
ArrayRef< unsigned > getProcResourceCycles(unsigned MBBNum) const
Get the scaled number of cycles used per processor resource in MBB.
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
BlockT * getHeader() const
Definition: LoopInfo.h:100
bool hasValidHeight() const
Returns true if the height resources have been computed from the trace below this block...
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
virtual const MachineBasicBlock * pickTraceSucc(const MachineBasicBlock *)=0
Select the trace through a block that has the fewest instructions.
bool hasValidDepth() const
Returns true if the depth resources have been computed from the trace above this block.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register. ...
void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs)
Adds registers contained in LiveRegs to the block live-in list of MBB.
COFF::MachineTypes Machine
Definition: COFFYAML.cpp:363
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they&#39;re not in a MachineFuncti...
virtual const TargetInstrInfo * getInstrInfo() const
bool isValid() const
Definition: MCSchedule.h:127
iterator erase(iterator I)
erase - Erases an existing element identified by a valid iterator.
Definition: SparseSet.h:286
TargetInstrInfo - Interface to description of machine instruction set.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:176
void invalidateDepth()
Invalidate depth resources when some block above this one has changed.
#define DEBUG_TYPE
void invalidate(const MachineBasicBlock *MBB)
Invalidate traces through BadMBB.
unsigned CriticalPath
Critical path length.
bool erase(const KeyT &Val)
Definition: DenseMap.h:298
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:291
bool HasValidInstrHeights
Instruction heights have been computed. This implies hasValidHeight().
typename DenseT::iterator iterator
Definition: SparseSet.h:172
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
unsigned InstrHeight
Accumulated number of instructions in the trace below this block.
Ensemble * getEnsemble(Strategy)
Get the trace ensemble representing the given trace selection strategy.
typename DenseT::const_iterator const_iterator
Definition: SparseSet.h:173
Identify one of the processor resource kinds consumed by a particular scheduling class for the specif...
Definition: MCSchedule.h:64
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
MachineInstrBuilder & UseMI
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:129
Summarize the scheduling resources required for an instruction of a particular scheduling class...
Definition: MCSchedule.h:110
Machine Trace Metrics
Per-basic block information that doesn&#39;t depend on the trace through the block.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
Represent the analysis usage information of a pass.
const MachineBasicBlock * Succ
Trace successor, or NULL for the last block in the trace.
Default po_iterator_storage implementation with an internal set object.
bool atEnd() const
atEnd - return true if this iterator is equal to reg_end() on the value.
const FixedBlockInfo * getResources(const MachineBasicBlock *)
Get the fixed resource information about MBB. Compute it on demand.
unsigned Tail
The block number of the tail of the trace. (When hasValidHeight()).
void setUniverse(unsigned U)
setUniverse - Set the universe size which determines the largest key the set can hold.
Definition: SparseSet.h:156
iterator_range< pred_iterator > predecessors()
void init(const TargetSubtargetInfo *TSInfo)
Initialize the machine model for instruction scheduling.
virtual const char * getName() const =0
virtual const MachineBasicBlock * pickTracePred(const MachineBasicBlock *)=0
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
A trace represents a plausible sequence of executed basic blocks that passes through the current basi...
size_t size() const
Definition: SmallVector.h:53
const_iterator end() const
Definition: SparseSet.h:176
bool isDebugInstr() const
Definition: MachineInstr.h:999
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static void updatePhysDepsDownwards(const MachineInstr *UseMI, SmallVectorImpl< DataDep > &Deps, SparseSet< LiveRegUnit > &RegUnits, const TargetRegisterInfo *TRI)
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
Definition: LoopInfo.h:110
BlockVerifier::State From
MachineOperand class - Representation of each machine instruction operand.
unsigned Height
Minimum number of cycles from this instruction is issued to the of the trace, as determined by data d...
MachineInstrBuilder MachineInstrBuilder & DefMI
const_iterator begin() const
Definition: SparseSet.h:175
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:381
CHAIN = SC CHAIN, Imm128 - System call.
iterator_range< po_ext_iterator< T, SetType > > post_order_ext(const T &G, SetType &S)
char & MachineTraceMetricsID
MachineTraceMetrics - This pass computes critical path and CPU resource usage in an ensemble of trace...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
Per-basic block information that relates to a specific trace through the block.
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
const TraceBlockInfo * getHeightResources(const MachineBasicBlock *) const
void setPreservesAll()
Set by analyses that do not transform their input at all.
unsigned getResourceFactor(unsigned ResIdx) const
Multiply the number of units consumed for a resource by this factor to normalize it relative to other...
void invalidate(const MachineBasicBlock *MBB)
Invalidate cached information about MBB.
unsigned Head
The block number of the head of the trace. (When hasValidDepth()).
unsigned getNumProcResourceKinds() const
Get the number of kinds of resources for this target.
const MCSchedClassDesc * resolveSchedClass(const MachineInstr *MI) const
Return the MCSchedClassDesc for this instruction.
static bool pushDepHeight(const DataDep &Dep, const MachineInstr &UseMI, unsigned UseHeight, MIHeightMap &Heights, const TargetSchedModel &SchedModel, const TargetInstrInfo *TII)
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
TargetSubtargetInfo - Generic base class for all target subtargets.
iterator_range< ipo_ext_iterator< T, SetType > > inverse_post_order_ext(const T &G, SetType &S)
bool insertEdge(Optional< const MachineBasicBlock *> From, const MachineBasicBlock *To)
static void getPHIDeps(const MachineInstr &UseMI, SmallVectorImpl< DataDep > &Deps, const MachineBasicBlock *Pred, const MachineRegisterInfo *MRI)
Representation of each machine instruction.
Definition: MachineInstr.h:64
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
pointer data()
Return a pointer to the vector&#39;s buffer, even if empty().
Definition: SmallVector.h:149
SparseSet - Fast set implmentation for objects that can be identified by small unsigned keys...
Definition: SparseSet.h:124
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:465
INITIALIZE_PASS_BEGIN(MachineTraceMetrics, DEBUG_TYPE, "Machine Trace Metrics", false, true) INITIALIZE_PASS_END(MachineTraceMetrics
iterator find(const KeyT &Key)
find - Find an element by its key.
Definition: SparseSet.h:225
InstrCycles represents the cycle height and depth of an instruction in a trace.
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:109
static bool isExitingLoop(const MachineLoop *From, const MachineLoop *To)
bool runOnMachineFunction(MachineFunction &) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
const TraceBlockInfo * getDepthResources(const MachineBasicBlock *) const
unsigned getResourceDepth(bool Bottom) const
Return the resource depth of the top/bottom of the trace center block.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:211
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned getPHIDepth(const MachineInstr &PHI) const
Return the Depth of a PHI instruction in a trace center block successor.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const MCProcResourceDesc * getProcResource(unsigned PIdx) const
Get a processor resource by ID for convenience.
mop_iterator operands_begin()
Definition: MachineInstr.h:453
MachineLoop * getLoopFor(const MachineBasicBlock *BB) const
Return the innermost loop that BB lives in.
unsigned getResourceLength(ArrayRef< const MachineBasicBlock *> Extrablocks=None, ArrayRef< const MCSchedClassDesc *> ExtraInstrs=None, ArrayRef< const MCSchedClassDesc *> RemoveInstrs=None) const
Return the resource length of the trace.
ArrayRef< unsigned > getProcResourceDepths(unsigned MBBNum) const
Get an array of processor resource depths for MBB.
unsigned InstrDepth
Accumulated number of instructions in the trace above this block.
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:46
IRTranslator LLVM IR MI
bool isDepInTrace(const MachineInstr &DefMI, const MachineInstr &UseMI) const
A dependence is useful if the basic block of the defining instruction is part of the trace of the use...
unsigned getInstrSlack(const MachineInstr &MI) const
Return the slack of MI.
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1238
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
static bool getDataDeps(const MachineInstr &UseMI, SmallVectorImpl< DataDep > &Deps, const MachineRegisterInfo *MRI)
bool isUsefulDominator(const TraceBlockInfo &TBI) const
Assuming that this is a dominator of TBI, determine if it contains useful instruction depths...
static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height, SparseSet< LiveRegUnit > &RegUnits, const TargetSchedModel &SchedModel, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
Trace getTrace(const MachineBasicBlock *MBB)
Get the trace that passes through MBB.
loops
Definition: LoopInfo.cpp:772
void resize(size_type N)
Definition: SmallVector.h:351
void getAnalysisUsage(AnalysisUsage &) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.