LLVM  8.0.1
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "LiveRangeCalc.h"
16 #include "Spiller.h"
17 #include "SplitKit.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
49 #include "llvm/Config/llvm-config.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
57 #include <cassert>
58 #include <iterator>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "regalloc"
66 
67 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
68 STATISTIC(NumSnippets, "Number of spilled snippets");
69 STATISTIC(NumSpills, "Number of spills inserted");
70 STATISTIC(NumSpillsRemoved, "Number of spills removed");
71 STATISTIC(NumReloads, "Number of reloads inserted");
72 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
73 STATISTIC(NumFolded, "Number of folded stack accesses");
74 STATISTIC(NumFoldedLoads, "Number of folded loads");
75 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
76 
77 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78  cl::desc("Disable inline spill hoisting"));
79 
80 namespace {
81 
82 class HoistSpillHelper : private LiveRangeEdit::Delegate {
83  MachineFunction &MF;
84  LiveIntervals &LIS;
85  LiveStacks &LSS;
86  AliasAnalysis *AA;
89  VirtRegMap &VRM;
91  const TargetInstrInfo &TII;
92  const TargetRegisterInfo &TRI;
93  const MachineBlockFrequencyInfo &MBFI;
94 
96 
97  // Map from StackSlot to the LiveInterval of the original register.
98  // Note the LiveInterval of the original register may have been deleted
99  // after it is spilled. We keep a copy here to track the range where
100  // spills can be moved.
102 
103  // Map from pair of (StackSlot and Original VNI) to a set of spills which
104  // have the same stackslot and have equal values defined by Original VNI.
105  // These spills are mergeable and are hoist candiates.
106  using MergeableSpillsMap =
108  MergeableSpillsMap MergeableSpills;
109 
110  /// This is the map from original register to a set containing all its
111  /// siblings. To hoist a spill to another BB, we need to find out a live
112  /// sibling there and use it as the source of the new spill.
114 
115  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
116  MachineBasicBlock &BB, unsigned &LiveReg);
117 
118  void rmRedundantSpills(
119  SmallPtrSet<MachineInstr *, 16> &Spills,
122 
123  void getVisitOrders(
124  MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
129 
130  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
131  SmallPtrSet<MachineInstr *, 16> &Spills,
134 
135 public:
136  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
137  VirtRegMap &vrm)
138  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
139  LSS(pass.getAnalysis<LiveStacks>()),
140  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
141  MDT(pass.getAnalysis<MachineDominatorTree>()),
142  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
143  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
144  TRI(*mf.getSubtarget().getRegisterInfo()),
145  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
146  IPA(LIS, mf.getNumBlockIDs()) {}
147 
148  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
149  unsigned Original);
150  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
151  void hoistAllSpills();
152  void LRE_DidCloneVirtReg(unsigned, unsigned) override;
153 };
154 
155 class InlineSpiller : public Spiller {
156  MachineFunction &MF;
157  LiveIntervals &LIS;
158  LiveStacks &LSS;
159  AliasAnalysis *AA;
162  VirtRegMap &VRM;
164  const TargetInstrInfo &TII;
165  const TargetRegisterInfo &TRI;
166  const MachineBlockFrequencyInfo &MBFI;
167 
168  // Variables that are valid during spill(), but used by multiple methods.
169  LiveRangeEdit *Edit;
170  LiveInterval *StackInt;
171  int StackSlot;
172  unsigned Original;
173 
174  // All registers to spill to StackSlot, including the main register.
175  SmallVector<unsigned, 8> RegsToSpill;
176 
177  // All COPY instructions to/from snippets.
178  // They are ignored since both operands refer to the same stack slot.
179  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
180 
181  // Values that failed to remat at some point.
182  SmallPtrSet<VNInfo*, 8> UsedValues;
183 
184  // Dead defs generated during spilling.
186 
187  // Object records spills information and does the hoisting.
188  HoistSpillHelper HSpiller;
189 
190  ~InlineSpiller() override = default;
191 
192 public:
193  InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
194  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
195  LSS(pass.getAnalysis<LiveStacks>()),
196  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
197  MDT(pass.getAnalysis<MachineDominatorTree>()),
198  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
199  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
200  TRI(*mf.getSubtarget().getRegisterInfo()),
201  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
202  HSpiller(pass, mf, vrm) {}
203 
204  void spill(LiveRangeEdit &) override;
205  void postOptimization() override;
206 
207 private:
208  bool isSnippet(const LiveInterval &SnipLI);
209  void collectRegsToSpill();
210 
211  bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
212 
213  bool isSibling(unsigned Reg);
214  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
215  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
216 
217  void markValueUsed(LiveInterval*, VNInfo*);
218  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
219  void reMaterializeAll();
220 
221  bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
222  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
223  MachineInstr *LoadMI = nullptr);
224  void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
225  void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
226 
227  void spillAroundUses(unsigned Reg);
228  void spillAll();
229 };
230 
231 } // end anonymous namespace
232 
233 Spiller::~Spiller() = default;
234 
235 void Spiller::anchor() {}
236 
238  MachineFunction &mf,
239  VirtRegMap &vrm) {
240  return new InlineSpiller(pass, mf, vrm);
241 }
242 
243 //===----------------------------------------------------------------------===//
244 // Snippets
245 //===----------------------------------------------------------------------===//
246 
247 // When spilling a virtual register, we also spill any snippets it is connected
248 // to. The snippets are small live ranges that only have a single real use,
249 // leftovers from live range splitting. Spilling them enables memory operand
250 // folding or tightens the live range around the single use.
251 //
252 // This minimizes register pressure and maximizes the store-to-load distance for
253 // spill slots which can be important in tight loops.
254 
255 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
256 /// otherwise return 0.
257 static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
258  if (!MI.isFullCopy())
259  return 0;
260  if (MI.getOperand(0).getReg() == Reg)
261  return MI.getOperand(1).getReg();
262  if (MI.getOperand(1).getReg() == Reg)
263  return MI.getOperand(0).getReg();
264  return 0;
265 }
266 
267 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
268 /// It is assumed that SnipLI is a virtual register with the same original as
269 /// Edit->getReg().
270 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
271  unsigned Reg = Edit->getReg();
272 
273  // A snippet is a tiny live range with only a single instruction using it
274  // besides copies to/from Reg or spills/fills. We accept:
275  //
276  // %snip = COPY %Reg / FILL fi#
277  // %snip = USE %snip
278  // %Reg = COPY %snip / SPILL %snip, fi#
279  //
280  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
281  return false;
282 
283  MachineInstr *UseMI = nullptr;
284 
285  // Check that all uses satisfy our criteria.
287  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
288  E = MRI.reg_instr_nodbg_end(); RI != E; ) {
289  MachineInstr &MI = *RI++;
290 
291  // Allow copies to/from Reg.
292  if (isFullCopyOf(MI, Reg))
293  continue;
294 
295  // Allow stack slot loads.
296  int FI;
297  if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
298  continue;
299 
300  // Allow stack slot stores.
301  if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
302  continue;
303 
304  // Allow a single additional instruction.
305  if (UseMI && &MI != UseMI)
306  return false;
307  UseMI = &MI;
308  }
309  return true;
310 }
311 
312 /// collectRegsToSpill - Collect live range snippets that only have a single
313 /// real use.
314 void InlineSpiller::collectRegsToSpill() {
315  unsigned Reg = Edit->getReg();
316 
317  // Main register always spills.
318  RegsToSpill.assign(1, Reg);
319  SnippetCopies.clear();
320 
321  // Snippets all have the same original, so there can't be any for an original
322  // register.
323  if (Original == Reg)
324  return;
325 
327  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
328  MachineInstr &MI = *RI++;
329  unsigned SnipReg = isFullCopyOf(MI, Reg);
330  if (!isSibling(SnipReg))
331  continue;
332  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
333  if (!isSnippet(SnipLI))
334  continue;
335  SnippetCopies.insert(&MI);
336  if (isRegToSpill(SnipReg))
337  continue;
338  RegsToSpill.push_back(SnipReg);
339  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
340  ++NumSnippets;
341  }
342 }
343 
344 bool InlineSpiller::isSibling(unsigned Reg) {
346  VRM.getOriginal(Reg) == Original;
347 }
348 
349 /// It is beneficial to spill to earlier place in the same BB in case
350 /// as follows:
351 /// There is an alternative def earlier in the same MBB.
352 /// Hoist the spill as far as possible in SpillMBB. This can ease
353 /// register pressure:
354 ///
355 /// x = def
356 /// y = use x
357 /// s = copy x
358 ///
359 /// Hoisting the spill of s to immediately after the def removes the
360 /// interference between x and y:
361 ///
362 /// x = def
363 /// spill x
364 /// y = use killed x
365 ///
366 /// This hoist only helps when the copy kills its source.
367 ///
368 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
369  MachineInstr &CopyMI) {
370  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
371 #ifndef NDEBUG
372  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
373  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
374 #endif
375 
376  unsigned SrcReg = CopyMI.getOperand(1).getReg();
377  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
378  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
379  LiveQueryResult SrcQ = SrcLI.Query(Idx);
380  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
381  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
382  return false;
383 
384  // Conservatively extend the stack slot range to the range of the original
385  // value. We may be able to do better with stack slot coloring by being more
386  // careful here.
387  assert(StackInt && "No stack slot assigned yet.");
388  LiveInterval &OrigLI = LIS.getInterval(Original);
389  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
390  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
391  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
392  << *StackInt << '\n');
393 
394  // We are going to spill SrcVNI immediately after its def, so clear out
395  // any later spills of the same value.
396  eliminateRedundantSpills(SrcLI, SrcVNI);
397 
398  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
400  if (SrcVNI->isPHIDef())
401  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
402  else {
403  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
404  assert(DefMI && "Defining instruction disappeared");
405  MII = DefMI;
406  ++MII;
407  }
408  // Insert spill without kill flag immediately after def.
409  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
410  MRI.getRegClass(SrcReg), &TRI);
411  --MII; // Point to store instruction.
412  LIS.InsertMachineInstrInMaps(*MII);
413  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
414 
415  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
416  ++NumSpills;
417  return true;
418 }
419 
420 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
421 /// redundant spills of this value in SLI.reg and sibling copies.
422 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
423  assert(VNI && "Missing value");
425  WorkList.push_back(std::make_pair(&SLI, VNI));
426  assert(StackInt && "No stack slot assigned yet.");
427 
428  do {
429  LiveInterval *LI;
430  std::tie(LI, VNI) = WorkList.pop_back_val();
431  unsigned Reg = LI->reg;
432  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
433  << VNI->def << " in " << *LI << '\n');
434 
435  // Regs to spill are taken care of.
436  if (isRegToSpill(Reg))
437  continue;
438 
439  // Add all of VNI's live range to StackInt.
440  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
441  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
442 
443  // Find all spills and copies of VNI.
445  UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
446  UI != E; ) {
447  MachineInstr &MI = *UI++;
448  if (!MI.isCopy() && !MI.mayStore())
449  continue;
450  SlotIndex Idx = LIS.getInstructionIndex(MI);
451  if (LI->getVNInfoAt(Idx) != VNI)
452  continue;
453 
454  // Follow sibling copies down the dominator tree.
455  if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
456  if (isSibling(DstReg)) {
457  LiveInterval &DstLI = LIS.getInterval(DstReg);
458  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
459  assert(DstVNI && "Missing defined value");
460  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
461  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
462  }
463  continue;
464  }
465 
466  // Erase spills.
467  int FI;
468  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
469  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
470  // eliminateDeadDefs won't normally remove stores, so switch opcode.
471  MI.setDesc(TII.get(TargetOpcode::KILL));
472  DeadDefs.push_back(&MI);
473  ++NumSpillsRemoved;
474  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
475  --NumSpills;
476  }
477  }
478  } while (!WorkList.empty());
479 }
480 
481 //===----------------------------------------------------------------------===//
482 // Rematerialization
483 //===----------------------------------------------------------------------===//
484 
485 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
486 /// instruction cannot be eliminated. See through snippet copies
487 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
489  WorkList.push_back(std::make_pair(LI, VNI));
490  do {
491  std::tie(LI, VNI) = WorkList.pop_back_val();
492  if (!UsedValues.insert(VNI).second)
493  continue;
494 
495  if (VNI->isPHIDef()) {
496  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
497  for (MachineBasicBlock *P : MBB->predecessors()) {
498  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
499  if (PVNI)
500  WorkList.push_back(std::make_pair(LI, PVNI));
501  }
502  continue;
503  }
504 
505  // Follow snippet copies.
506  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
507  if (!SnippetCopies.count(MI))
508  continue;
509  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
510  assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
511  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
512  assert(SnipVNI && "Snippet undefined before copy");
513  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
514  } while (!WorkList.empty());
515 }
516 
517 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
518 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
519  // Analyze instruction
521  MIBundleOperands::VirtRegInfo RI =
522  MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
523 
524  if (!RI.Reads)
525  return false;
526 
527  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
528  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
529 
530  if (!ParentVNI) {
531  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
532  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
533  MachineOperand &MO = MI.getOperand(i);
534  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
535  MO.setIsUndef();
536  }
537  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
538  return true;
539  }
540 
541  if (SnippetCopies.count(&MI))
542  return false;
543 
544  LiveInterval &OrigLI = LIS.getInterval(Original);
545  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
546  LiveRangeEdit::Remat RM(ParentVNI);
547  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
548 
549  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
550  markValueUsed(&VirtReg, ParentVNI);
551  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
552  return false;
553  }
554 
555  // If the instruction also writes VirtReg.reg, it had better not require the
556  // same register for uses and defs.
557  if (RI.Tied) {
558  markValueUsed(&VirtReg, ParentVNI);
559  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
560  return false;
561  }
562 
563  // Before rematerializing into a register for a single instruction, try to
564  // fold a load into the instruction. That avoids allocating a new register.
565  if (RM.OrigMI->canFoldAsLoad() &&
566  foldMemoryOperand(Ops, RM.OrigMI)) {
567  Edit->markRematerialized(RM.ParentVNI);
568  ++NumFoldedLoads;
569  return true;
570  }
571 
572  // Allocate a new register for the remat.
573  unsigned NewVReg = Edit->createFrom(Original);
574 
575  // Finally we can rematerialize OrigMI before MI.
576  SlotIndex DefIdx =
577  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
578 
579  // We take the DebugLoc from MI, since OrigMI may be attributed to a
580  // different source location.
581  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
582  NewMI->setDebugLoc(MI.getDebugLoc());
583 
584  (void)DefIdx;
585  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
586  << *LIS.getInstructionFromIndex(DefIdx));
587 
588  // Replace operands
589  for (const auto &OpPair : Ops) {
590  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
591  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
592  MO.setReg(NewVReg);
593  MO.setIsKill();
594  }
595  }
596  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
597 
598  ++NumRemats;
599  return true;
600 }
601 
602 /// reMaterializeAll - Try to rematerialize as many uses as possible,
603 /// and trim the live ranges after.
604 void InlineSpiller::reMaterializeAll() {
605  if (!Edit->anyRematerializable(AA))
606  return;
607 
608  UsedValues.clear();
609 
610  // Try to remat before all uses of snippets.
611  bool anyRemat = false;
612  for (unsigned Reg : RegsToSpill) {
613  LiveInterval &LI = LIS.getInterval(Reg);
615  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
616  RegI != E; ) {
617  MachineInstr &MI = *RegI++;
618 
619  // Debug values are not allowed to affect codegen.
620  if (MI.isDebugValue())
621  continue;
622 
623  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
624  "instruction that isn't a DBG_VALUE");
625 
626  anyRemat |= reMaterializeFor(LI, MI);
627  }
628  }
629  if (!anyRemat)
630  return;
631 
632  // Remove any values that were completely rematted.
633  for (unsigned Reg : RegsToSpill) {
634  LiveInterval &LI = LIS.getInterval(Reg);
635  for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
636  I != E; ++I) {
637  VNInfo *VNI = *I;
638  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
639  continue;
640  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
641  MI->addRegisterDead(Reg, &TRI);
642  if (!MI->allDefsAreDead())
643  continue;
644  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
645  DeadDefs.push_back(MI);
646  }
647  }
648 
649  // Eliminate dead code after remat. Note that some snippet copies may be
650  // deleted here.
651  if (DeadDefs.empty())
652  return;
653  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
654  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
655 
656  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
657  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
658  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
659  // removed, PHI VNI are still left in the LiveInterval.
660  // So to get rid of unused reg, we need to check whether it has non-dbg
661  // reference instead of whether it has non-empty interval.
662  unsigned ResultPos = 0;
663  for (unsigned Reg : RegsToSpill) {
664  if (MRI.reg_nodbg_empty(Reg)) {
665  Edit->eraseVirtReg(Reg);
666  continue;
667  }
668 
669  assert(LIS.hasInterval(Reg) &&
670  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
671  "Empty and not used live-range?!");
672 
673  RegsToSpill[ResultPos++] = Reg;
674  }
675  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
676  LLVM_DEBUG(dbgs() << RegsToSpill.size()
677  << " registers to spill after remat.\n");
678 }
679 
680 //===----------------------------------------------------------------------===//
681 // Spilling
682 //===----------------------------------------------------------------------===//
683 
684 /// If MI is a load or store of StackSlot, it can be removed.
685 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
686  int FI = 0;
687  unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
688  bool IsLoad = InstrReg;
689  if (!IsLoad)
690  InstrReg = TII.isStoreToStackSlot(*MI, FI);
691 
692  // We have a stack access. Is it the right register and slot?
693  if (InstrReg != Reg || FI != StackSlot)
694  return false;
695 
696  if (!IsLoad)
697  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
698 
699  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
700  LIS.RemoveMachineInstrFromMaps(*MI);
701  MI->eraseFromParent();
702 
703  if (IsLoad) {
704  ++NumReloadsRemoved;
705  --NumReloads;
706  } else {
707  ++NumSpillsRemoved;
708  --NumSpills;
709  }
710 
711  return true;
712 }
713 
714 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
716 // Dump the range of instructions from B to E with their slot indexes.
719  LiveIntervals const &LIS,
720  const char *const header,
721  unsigned VReg =0) {
722  char NextLine = '\n';
723  char SlotIndent = '\t';
724 
725  if (std::next(B) == E) {
726  NextLine = ' ';
727  SlotIndent = ' ';
728  }
729 
730  dbgs() << '\t' << header << ": " << NextLine;
731 
732  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
733  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
734 
735  // If a register was passed in and this instruction has it as a
736  // destination that is marked as an early clobber, print the
737  // early-clobber slot index.
738  if (VReg) {
739  MachineOperand *MO = I->findRegisterDefOperand(VReg);
740  if (MO && MO->isEarlyClobber())
741  Idx = Idx.getRegSlot(true);
742  }
743 
744  dbgs() << SlotIndent << Idx << '\t' << *I;
745  }
746 }
747 #endif
748 
749 /// foldMemoryOperand - Try folding stack slot references in Ops into their
750 /// instructions.
751 ///
752 /// @param Ops Operand indices from analyzeVirtReg().
753 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
754 /// @return True on success.
755 bool InlineSpiller::
756 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
757  MachineInstr *LoadMI) {
758  if (Ops.empty())
759  return false;
760  // Don't attempt folding in bundles.
761  MachineInstr *MI = Ops.front().first;
762  if (Ops.back().first != MI || MI->isBundled())
763  return false;
764 
765  bool WasCopy = MI->isCopy();
766  unsigned ImpReg = 0;
767 
768  // Spill subregs if the target allows it.
769  // We always want to spill subregs for stackmap/patchpoint pseudos.
770  bool SpillSubRegs = TII.isSubregFoldable() ||
771  MI->getOpcode() == TargetOpcode::STATEPOINT ||
772  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
773  MI->getOpcode() == TargetOpcode::STACKMAP;
774 
775  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
776  // operands.
777  SmallVector<unsigned, 8> FoldOps;
778  for (const auto &OpPair : Ops) {
779  unsigned Idx = OpPair.second;
780  assert(MI == OpPair.first && "Instruction conflict during operand folding");
781  MachineOperand &MO = MI->getOperand(Idx);
782  if (MO.isImplicit()) {
783  ImpReg = MO.getReg();
784  continue;
785  }
786 
787  if (!SpillSubRegs && MO.getSubReg())
788  return false;
789  // We cannot fold a load instruction into a def.
790  if (LoadMI && MO.isDef())
791  return false;
792  // Tied use operands should not be passed to foldMemoryOperand.
793  if (!MI->isRegTiedToDefOperand(Idx))
794  FoldOps.push_back(Idx);
795  }
796 
797  // If we only have implicit uses, we won't be able to fold that.
798  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
799  if (FoldOps.empty())
800  return false;
801 
802  MachineInstrSpan MIS(MI);
803 
804  MachineInstr *FoldMI =
805  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
806  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS);
807  if (!FoldMI)
808  return false;
809 
810  // Remove LIS for any dead defs in the original MI not in FoldMI.
811  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
812  if (!MO->isReg())
813  continue;
814  unsigned Reg = MO->getReg();
815  if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
816  MRI.isReserved(Reg)) {
817  continue;
818  }
819  // Skip non-Defs, including undef uses and internal reads.
820  if (MO->isUse())
821  continue;
822  MIBundleOperands::PhysRegInfo RI =
823  MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
824  if (RI.FullyDefined)
825  continue;
826  // FoldMI does not define this physreg. Remove the LI segment.
827  assert(MO->isDead() && "Cannot fold physreg def");
828  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
829  LIS.removePhysRegDefAt(Reg, Idx);
830  }
831 
832  int FI;
833  if (TII.isStoreToStackSlot(*MI, FI) &&
834  HSpiller.rmFromMergeableSpills(*MI, FI))
835  --NumSpills;
836  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
837  MI->eraseFromParent();
838 
839  // Insert any new instructions other than FoldMI into the LIS maps.
840  assert(!MIS.empty() && "Unexpected empty span of instructions!");
841  for (MachineInstr &MI : MIS)
842  if (&MI != FoldMI)
843  LIS.InsertMachineInstrInMaps(MI);
844 
845  // TII.foldMemoryOperand may have left some implicit operands on the
846  // instruction. Strip them.
847  if (ImpReg)
848  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
849  MachineOperand &MO = FoldMI->getOperand(i - 1);
850  if (!MO.isReg() || !MO.isImplicit())
851  break;
852  if (MO.getReg() == ImpReg)
853  FoldMI->RemoveOperand(i - 1);
854  }
855 
856  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
857  "folded"));
858 
859  if (!WasCopy)
860  ++NumFolded;
861  else if (Ops.front().second == 0) {
862  ++NumSpills;
863  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
864  } else
865  ++NumReloads;
866  return true;
867 }
868 
869 void InlineSpiller::insertReload(unsigned NewVReg,
870  SlotIndex Idx,
872  MachineBasicBlock &MBB = *MI->getParent();
873 
874  MachineInstrSpan MIS(MI);
875  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
876  MRI.getRegClass(NewVReg), &TRI);
877 
878  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
879 
881  NewVReg));
882  ++NumReloads;
883 }
884 
885 /// Check if \p Def fully defines a VReg with an undefined value.
886 /// If that's the case, that means the value of VReg is actually
887 /// not relevant.
888 static bool isFullUndefDef(const MachineInstr &Def) {
889  if (!Def.isImplicitDef())
890  return false;
891  assert(Def.getNumOperands() == 1 &&
892  "Implicit def with more than one definition");
893  // We can say that the VReg defined by Def is undef, only if it is
894  // fully defined by Def. Otherwise, some of the lanes may not be
895  // undef and the value of the VReg matters.
896  return !Def.getOperand(0).getSubReg();
897 }
898 
899 /// insertSpill - Insert a spill of NewVReg after MI.
900 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
902  MachineBasicBlock &MBB = *MI->getParent();
903 
904  MachineInstrSpan MIS(MI);
905  bool IsRealSpill = true;
906  if (isFullUndefDef(*MI)) {
907  // Don't spill undef value.
908  // Anything works for undef, in particular keeping the memory
909  // uninitialized is a viable option and it saves code size and
910  // run time.
911  BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
912  .addReg(NewVReg, getKillRegState(isKill));
913  IsRealSpill = false;
914  } else
915  TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
916  MRI.getRegClass(NewVReg), &TRI);
917 
918  LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
919 
920  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
921  "spill"));
922  ++NumSpills;
923  if (IsRealSpill)
924  HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
925 }
926 
927 /// spillAroundUses - insert spill code around each use of Reg.
928 void InlineSpiller::spillAroundUses(unsigned Reg) {
929  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
930  LiveInterval &OldLI = LIS.getInterval(Reg);
931 
932  // Iterate over instructions using Reg.
934  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
935  RegI != E; ) {
936  MachineInstr *MI = &*(RegI++);
937 
938  // Debug values are not allowed to affect codegen.
939  if (MI->isDebugValue()) {
940  // Modify DBG_VALUE now that the value is in a spill slot.
941  MachineBasicBlock *MBB = MI->getParent();
942  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
943  buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
944  MBB->erase(MI);
945  continue;
946  }
947 
948  assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
949  "instruction that isn't a DBG_VALUE");
950 
951  // Ignore copies to/from snippets. We'll delete them.
952  if (SnippetCopies.count(MI))
953  continue;
954 
955  // Stack slot accesses may coalesce away.
956  if (coalesceStackAccess(MI, Reg))
957  continue;
958 
959  // Analyze instruction.
961  MIBundleOperands::VirtRegInfo RI =
962  MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
963 
964  // Find the slot index where this instruction reads and writes OldLI.
965  // This is usually the def slot, except for tied early clobbers.
966  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
967  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
968  if (SlotIndex::isSameInstr(Idx, VNI->def))
969  Idx = VNI->def;
970 
971  // Check for a sibling copy.
972  unsigned SibReg = isFullCopyOf(*MI, Reg);
973  if (SibReg && isSibling(SibReg)) {
974  // This may actually be a copy between snippets.
975  if (isRegToSpill(SibReg)) {
976  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
977  SnippetCopies.insert(MI);
978  continue;
979  }
980  if (RI.Writes) {
981  if (hoistSpillInsideBB(OldLI, *MI)) {
982  // This COPY is now dead, the value is already in the stack slot.
983  MI->getOperand(0).setIsDead();
984  DeadDefs.push_back(MI);
985  continue;
986  }
987  } else {
988  // This is a reload for a sib-reg copy. Drop spills downstream.
989  LiveInterval &SibLI = LIS.getInterval(SibReg);
990  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
991  // The COPY will fold to a reload below.
992  }
993  }
994 
995  // Attempt to fold memory ops.
996  if (foldMemoryOperand(Ops))
997  continue;
998 
999  // Create a new virtual register for spill/fill.
1000  // FIXME: Infer regclass from instruction alone.
1001  unsigned NewVReg = Edit->createFrom(Reg);
1002 
1003  if (RI.Reads)
1004  insertReload(NewVReg, Idx, MI);
1005 
1006  // Rewrite instruction operands.
1007  bool hasLiveDef = false;
1008  for (const auto &OpPair : Ops) {
1009  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1010  MO.setReg(NewVReg);
1011  if (MO.isUse()) {
1012  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1013  MO.setIsKill();
1014  } else {
1015  if (!MO.isDead())
1016  hasLiveDef = true;
1017  }
1018  }
1019  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1020 
1021  // FIXME: Use a second vreg if instruction has no tied ops.
1022  if (RI.Writes)
1023  if (hasLiveDef)
1024  insertSpill(NewVReg, true, MI);
1025  }
1026 }
1027 
1028 /// spillAll - Spill all registers remaining after rematerialization.
1029 void InlineSpiller::spillAll() {
1030  // Update LiveStacks now that we are committed to spilling.
1031  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1032  StackSlot = VRM.assignVirt2StackSlot(Original);
1033  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1034  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1035  } else
1036  StackInt = &LSS.getInterval(StackSlot);
1037 
1038  if (Original != Edit->getReg())
1039  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1040 
1041  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1042  for (unsigned Reg : RegsToSpill)
1043  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1044  StackInt->getValNumInfo(0));
1045  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1046 
1047  // Spill around uses of all RegsToSpill.
1048  for (unsigned Reg : RegsToSpill)
1049  spillAroundUses(Reg);
1050 
1051  // Hoisted spills may cause dead code.
1052  if (!DeadDefs.empty()) {
1053  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1054  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1055  }
1056 
1057  // Finally delete the SnippetCopies.
1058  for (unsigned Reg : RegsToSpill) {
1060  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1061  RI != E; ) {
1062  MachineInstr &MI = *(RI++);
1063  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1064  // FIXME: Do this with a LiveRangeEdit callback.
1065  LIS.RemoveMachineInstrFromMaps(MI);
1066  MI.eraseFromParent();
1067  }
1068  }
1069 
1070  // Delete all spilled registers.
1071  for (unsigned Reg : RegsToSpill)
1072  Edit->eraseVirtReg(Reg);
1073 }
1074 
1075 void InlineSpiller::spill(LiveRangeEdit &edit) {
1076  ++NumSpilledRanges;
1077  Edit = &edit;
1079  && "Trying to spill a stack slot.");
1080  // Share a stack slot among all descendants of Original.
1081  Original = VRM.getOriginal(edit.getReg());
1082  StackSlot = VRM.getStackSlot(Original);
1083  StackInt = nullptr;
1084 
1085  LLVM_DEBUG(dbgs() << "Inline spilling "
1086  << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1087  << ':' << edit.getParent() << "\nFrom original "
1088  << printReg(Original) << '\n');
1089  assert(edit.getParent().isSpillable() &&
1090  "Attempting to spill already spilled value.");
1091  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1092 
1093  collectRegsToSpill();
1094  reMaterializeAll();
1095 
1096  // Remat may handle everything.
1097  if (!RegsToSpill.empty())
1098  spillAll();
1099 
1100  Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1101 }
1102 
1103 /// Optimizations after all the reg selections and spills are done.
1104 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1105 
1106 /// When a spill is inserted, add the spill to MergeableSpills map.
1107 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1108  unsigned Original) {
1109  BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1110  LiveInterval &OrigLI = LIS.getInterval(Original);
1111  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1112  // LiveInterval may be cleared after all its references are spilled.
1113  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1114  auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1115  LI->assign(OrigLI, Allocator);
1116  StackSlotToOrigLI[StackSlot] = std::move(LI);
1117  }
1118  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1119  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1120  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1121  MergeableSpills[MIdx].insert(&Spill);
1122 }
1123 
1124 /// When a spill is removed, remove the spill from MergeableSpills map.
1125 /// Return true if the spill is removed successfully.
1126 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1127  int StackSlot) {
1128  auto It = StackSlotToOrigLI.find(StackSlot);
1129  if (It == StackSlotToOrigLI.end())
1130  return false;
1131  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1132  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1133  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1134  return MergeableSpills[MIdx].erase(&Spill);
1135 }
1136 
1137 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1138 /// i.e., there should be a living sibling of OrigReg at the insert point.
1139 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1140  MachineBasicBlock &BB, unsigned &LiveReg) {
1141  SlotIndex Idx;
1142  unsigned OrigReg = OrigLI.reg;
1143  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1144  if (MI != BB.end())
1145  Idx = LIS.getInstructionIndex(*MI);
1146  else
1147  Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1148  SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1149  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1150 
1151  for (auto const SibReg : Siblings) {
1152  LiveInterval &LI = LIS.getInterval(SibReg);
1153  VNInfo *VNI = LI.getVNInfoAt(Idx);
1154  if (VNI) {
1155  LiveReg = SibReg;
1156  return true;
1157  }
1158  }
1159  return false;
1160 }
1161 
1162 /// Remove redundant spills in the same BB. Save those redundant spills in
1163 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1164 void HoistSpillHelper::rmRedundantSpills(
1166  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1168  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1169  // another spill inside. If a BB contains more than one spill, only keep the
1170  // earlier spill with smaller SlotIndex.
1171  for (const auto CurrentSpill : Spills) {
1172  MachineBasicBlock *Block = CurrentSpill->getParent();
1173  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1174  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1175  if (PrevSpill) {
1176  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1177  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1178  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1179  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1180  SpillsToRm.push_back(SpillToRm);
1181  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1182  } else {
1183  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1184  }
1185  }
1186  for (const auto SpillToRm : SpillsToRm)
1187  Spills.erase(SpillToRm);
1188 }
1189 
1190 /// Starting from \p Root find a top-down traversal order of the dominator
1191 /// tree to visit all basic blocks containing the elements of \p Spills.
1192 /// Redundant spills will be found and put into \p SpillsToRm at the same
1193 /// time. \p SpillBBToSpill will be populated as part of the process and
1194 /// maps a basic block to the first store occurring in the basic block.
1195 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1196 void HoistSpillHelper::getVisitOrders(
1199  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1202  // The set contains all the possible BB nodes to which we may hoist
1203  // original spills.
1205  // Save the BB nodes on the path from the first BB node containing
1206  // non-redundant spill to the Root node.
1208  // All the spills to be hoisted must originate from a single def instruction
1209  // to the OrigReg. It means the def instruction should dominate all the spills
1210  // to be hoisted. We choose the BB where the def instruction is located as
1211  // the Root.
1212  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1213  // For every node on the dominator tree with spill, walk up on the dominator
1214  // tree towards the Root node until it is reached. If there is other node
1215  // containing spill in the middle of the path, the previous spill saw will
1216  // be redundant and the node containing it will be removed. All the nodes on
1217  // the path starting from the first node with non-redundant spill to the Root
1218  // node will be added to the WorkSet, which will contain all the possible
1219  // locations where spills may be hoisted to after the loop below is done.
1220  for (const auto Spill : Spills) {
1221  MachineBasicBlock *Block = Spill->getParent();
1222  MachineDomTreeNode *Node = MDT[Block];
1223  MachineInstr *SpillToRm = nullptr;
1224  while (Node != RootIDomNode) {
1225  // If Node dominates Block, and it already contains a spill, the spill in
1226  // Block will be redundant.
1227  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1228  SpillToRm = SpillBBToSpill[MDT[Block]];
1229  break;
1230  /// If we see the Node already in WorkSet, the path from the Node to
1231  /// the Root node must already be traversed by another spill.
1232  /// Then no need to repeat.
1233  } else if (WorkSet.count(Node)) {
1234  break;
1235  } else {
1236  NodesOnPath.insert(Node);
1237  }
1238  Node = Node->getIDom();
1239  }
1240  if (SpillToRm) {
1241  SpillsToRm.push_back(SpillToRm);
1242  } else {
1243  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1244  // set the initial status before hoisting start. The value of BBs
1245  // containing original spills is set to 0, in order to descriminate
1246  // with BBs containing hoisted spills which will be inserted to
1247  // SpillsToKeep later during hoisting.
1248  SpillsToKeep[MDT[Block]] = 0;
1249  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1250  }
1251  NodesOnPath.clear();
1252  }
1253 
1254  // Sort the nodes in WorkSet in top-down order and save the nodes
1255  // in Orders. Orders will be used for hoisting in runHoistSpills.
1256  unsigned idx = 0;
1257  Orders.push_back(MDT.getBase().getNode(Root));
1258  do {
1259  MachineDomTreeNode *Node = Orders[idx++];
1260  const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1261  unsigned NumChildren = Children.size();
1262  for (unsigned i = 0; i != NumChildren; ++i) {
1263  MachineDomTreeNode *Child = Children[i];
1264  if (WorkSet.count(Child))
1265  Orders.push_back(Child);
1266  }
1267  } while (idx != Orders.size());
1268  assert(Orders.size() == WorkSet.size() &&
1269  "Orders have different size with WorkSet");
1270 
1271 #ifndef NDEBUG
1272  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1274  for (; RIt != Orders.rend(); RIt++)
1275  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1276  LLVM_DEBUG(dbgs() << "\n");
1277 #endif
1278 }
1279 
1280 /// Try to hoist spills according to BB hotness. The spills to removed will
1281 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1282 /// \p SpillsToIns.
1283 void HoistSpillHelper::runHoistSpills(
1284  LiveInterval &OrigLI, VNInfo &OrigVNI,
1286  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1288  // Visit order of dominator tree nodes.
1290  // SpillsToKeep contains all the nodes where spills are to be inserted
1291  // during hoisting. If the spill to be inserted is an original spill
1292  // (not a hoisted one), the value of the map entry is 0. If the spill
1293  // is a hoisted spill, the value of the map entry is the VReg to be used
1294  // as the source of the spill.
1296  // Map from BB to the first spill inside of it.
1298 
1299  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1300 
1301  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1302  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1303  SpillBBToSpill);
1304 
1305  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1306  // nodes set and the cost of all the spills inside those nodes.
1307  // The nodes set are the locations where spills are to be inserted
1308  // in the subtree of current node.
1309  using NodesCostPair =
1310  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1312 
1313  // Iterate Orders set in reverse order, which will be a bottom-up order
1314  // in the dominator tree. Once we visit a dom tree node, we know its
1315  // children have already been visited and the spill locations in the
1316  // subtrees of all the children have been determined.
1318  for (; RIt != Orders.rend(); RIt++) {
1319  MachineBasicBlock *Block = (*RIt)->getBlock();
1320 
1321  // If Block contains an original spill, simply continue.
1322  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1323  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1324  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1325  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1326  continue;
1327  }
1328 
1329  // Collect spills in subtree of current node (*RIt) to
1330  // SpillsInSubTreeMap[*RIt].first.
1331  const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1332  unsigned NumChildren = Children.size();
1333  for (unsigned i = 0; i != NumChildren; ++i) {
1334  MachineDomTreeNode *Child = Children[i];
1335  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1336  continue;
1337  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1338  // should be placed before getting the begin and end iterators of
1339  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1340  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1341  // and the map grows and then the original buckets in the map are moved.
1342  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1343  SpillsInSubTreeMap[*RIt].first;
1344  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1345  SubTreeCost += SpillsInSubTreeMap[Child].second;
1346  auto BI = SpillsInSubTreeMap[Child].first.begin();
1347  auto EI = SpillsInSubTreeMap[Child].first.end();
1348  SpillsInSubTree.insert(BI, EI);
1349  SpillsInSubTreeMap.erase(Child);
1350  }
1351 
1352  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1353  SpillsInSubTreeMap[*RIt].first;
1354  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1355  // No spills in subtree, simply continue.
1356  if (SpillsInSubTree.empty())
1357  continue;
1358 
1359  // Check whether Block is a possible candidate to insert spill.
1360  unsigned LiveReg = 0;
1361  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1362  continue;
1363 
1364  // If there are multiple spills that could be merged, bias a little
1365  // to hoist the spill.
1366  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1367  ? BranchProbability(9, 10)
1368  : BranchProbability(1, 1);
1369  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1370  // Hoist: Move spills to current Block.
1371  for (const auto SpillBB : SpillsInSubTree) {
1372  // When SpillBB is a BB contains original spill, insert the spill
1373  // to SpillsToRm.
1374  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1375  !SpillsToKeep[SpillBB]) {
1376  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1377  SpillsToRm.push_back(SpillToRm);
1378  }
1379  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1380  SpillsToKeep.erase(SpillBB);
1381  }
1382  // Current Block is the BB containing the new hoisted spill. Add it to
1383  // SpillsToKeep. LiveReg is the source of the new spill.
1384  SpillsToKeep[*RIt] = LiveReg;
1385  LLVM_DEBUG({
1386  dbgs() << "spills in BB: ";
1387  for (const auto Rspill : SpillsInSubTree)
1388  dbgs() << Rspill->getBlock()->getNumber() << " ";
1389  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1390  << "\n";
1391  });
1392  SpillsInSubTree.clear();
1393  SpillsInSubTree.insert(*RIt);
1394  SubTreeCost = MBFI.getBlockFreq(Block);
1395  }
1396  }
1397  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1398  // save them to SpillsToIns.
1399  for (const auto Ent : SpillsToKeep) {
1400  if (Ent.second)
1401  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1402  }
1403 }
1404 
1405 /// For spills with equal values, remove redundant spills and hoist those left
1406 /// to less hot spots.
1407 ///
1408 /// Spills with equal values will be collected into the same set in
1409 /// MergeableSpills when spill is inserted. These equal spills are originated
1410 /// from the same defining instruction and are dominated by the instruction.
1411 /// Before hoisting all the equal spills, redundant spills inside in the same
1412 /// BB are first marked to be deleted. Then starting from the spills left, walk
1413 /// up on the dominator tree towards the Root node where the define instruction
1414 /// is located, mark the dominated spills to be deleted along the way and
1415 /// collect the BB nodes on the path from non-dominated spills to the define
1416 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1417 /// where we are considering to hoist the spills. We iterate the WorkSet in
1418 /// bottom-up order, and for each node, we will decide whether to hoist spills
1419 /// inside its subtree to that node. In this way, we can get benefit locally
1420 /// even if hoisting all the equal spills to one cold place is impossible.
1421 void HoistSpillHelper::hoistAllSpills() {
1422  SmallVector<unsigned, 4> NewVRegs;
1423  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1424 
1425  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1426  unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1427  unsigned Original = VRM.getPreSplitReg(Reg);
1428  if (!MRI.def_empty(Reg))
1429  Virt2SiblingsMap[Original].insert(Reg);
1430  }
1431 
1432  // Each entry in MergeableSpills contains a spill set with equal values.
1433  for (auto &Ent : MergeableSpills) {
1434  int Slot = Ent.first.first;
1435  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1436  VNInfo *OrigVNI = Ent.first.second;
1437  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1438  if (Ent.second.empty())
1439  continue;
1440 
1441  LLVM_DEBUG({
1442  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1443  << "Equal spills in BB: ";
1444  for (const auto spill : EqValSpills)
1445  dbgs() << spill->getParent()->getNumber() << " ";
1446  dbgs() << "\n";
1447  });
1448 
1449  // SpillsToRm is the spill set to be removed from EqValSpills.
1451  // SpillsToIns is the spill set to be newly inserted after hoisting.
1453 
1454  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1455 
1456  LLVM_DEBUG({
1457  dbgs() << "Finally inserted spills in BB: ";
1458  for (const auto Ispill : SpillsToIns)
1459  dbgs() << Ispill.first->getNumber() << " ";
1460  dbgs() << "\nFinally removed spills in BB: ";
1461  for (const auto Rspill : SpillsToRm)
1462  dbgs() << Rspill->getParent()->getNumber() << " ";
1463  dbgs() << "\n";
1464  });
1465 
1466  // Stack live range update.
1467  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1468  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1469  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1470  StackIntvl.getValNumInfo(0));
1471 
1472  // Insert hoisted spills.
1473  for (auto const Insert : SpillsToIns) {
1474  MachineBasicBlock *BB = Insert.first;
1475  unsigned LiveReg = Insert.second;
1476  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1477  TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1478  MRI.getRegClass(LiveReg), &TRI);
1479  LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1480  ++NumSpills;
1481  }
1482 
1483  // Remove redundant spills or change them to dead instructions.
1484  NumSpills -= SpillsToRm.size();
1485  for (auto const RMEnt : SpillsToRm) {
1486  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1487  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1488  MachineOperand &MO = RMEnt->getOperand(i - 1);
1489  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1490  RMEnt->RemoveOperand(i - 1);
1491  }
1492  }
1493  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1494  }
1495 }
1496 
1497 /// For VirtReg clone, the \p New register should have the same physreg or
1498 /// stackslot as the \p old register.
1499 void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1500  if (VRM.hasPhys(Old))
1501  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1502  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1503  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1504  else
1505  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1506 }
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
Safe Stack instrumentation pass
Definition: SafeStack.cpp:907
const unsigned reg
Definition: LiveInterval.h:667
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:242
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
This class represents lattice values for constants.
Definition: AllocatorList.h:24
static unsigned index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
VNInfoList::iterator vni_iterator
Definition: LiveInterval.h:217
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
vni_iterator vni_begin()
Definition: LiveInterval.h:220
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:638
MIBundleOperands - Iterate over all operands in a bundle of machine instructions. ...
unsigned getReg() const
getReg - Returns the register number.
void setIsUndef(bool Val=true)
void eliminateDeadDefs(SmallVectorImpl< MachineInstr *> &Dead, ArrayRef< unsigned > RegsBeingSpilled=None, AliasAnalysis *AA=nullptr)
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
unsigned getSubReg() const
Spiller interface.
Definition: Spiller.h:24
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:38
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
void setIsDead(bool Val=true)
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:50
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:49
VirtRegInfo analyzeVirtReg(unsigned Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
analyzeVirtReg - Analyze how the current instruction or bundle uses a virtual register.
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
bool isEarlyClobber() const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
Hexagon Hardware Loops
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:412
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
Result of a LiveRange query.
Definition: LiveInterval.h:90
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI)
analyzePhysReg - Analyze how the current instruction or bundle uses a physical register.
#define LLVM_DUMP_METHOD
Definition: Compiler.h:74
MachineBasicBlock::iterator end()
defusechain_iterator - This class provides iterator support for machine operands in the function that...
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isFullCopy() const
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def...
Definition: SlotIndexes.h:255
Base class for the actual dominator tree node.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
const std::vector< DomTreeNodeBase * > & getChildren() const
unsigned getReg() const
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
unsigned getKillRegState(bool B)
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:529
TargetInstrInfo - Interface to description of machine instruction set.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:409
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:820
#define P(N)
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, unsigned VReg=0)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:141
bool isBundled() const
Return true if this instruction part of a bundle.
Definition: MachineInstr.h:356
MachineInstrBuilder & UseMI
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:92
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
iterator_range< pred_iterator > predecessors()
bool isCopy() const
bool isImplicitDef() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:53
bool isDebugInstr() const
Definition: MachineInstr.h:999
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setIsKill(bool Val=true)
virtual ~Spiller()=0
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
size_type size() const
Definition: SmallPtrSet.h:93
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
Basic Register Allocator
LiveInterval & getParent() const
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:298
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
static bool isFullUndefDef(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:198
bool isDebugValue() const
Definition: MachineInstr.h:997
MachineOperand class - Representation of each machine instruction operand.
MachineInstrBuilder MachineInstrBuilder & DefMI
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:381
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx...
Definition: LiveInterval.h:417
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Spiller * createInlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
unsigned getNumValNums() const
Definition: LiveInterval.h:301
Representation of each machine instruction.
Definition: MachineInstr.h:64
iterator begin() const
Definition: SmallPtrSet.h:397
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:240
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:290
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:753
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0...
iterator end() const
Definition: SmallPtrSet.h:402
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Remat - Information needed to rematerialize at a specific location.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:767
iterator SkipPHIsLabelsAndDebug(iterator I)
Return the first instruction in MBB after I that is not a PHI, label or debug.
IRTranslator LLVM IR MI
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:84
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
vni_iterator vni_end()
Definition: LiveInterval.h:221
bool isImplicit() const
MachineBasicBlock::iterator begin()
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1245