LLVM  8.0.1
InterferenceCache.cpp
Go to the documentation of this file.
1 //===- InterferenceCache.cpp - Caching per-block interference -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // InterferenceCache remembers per-block interference in LiveIntervalUnions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InterferenceCache.h"
15 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/MC/MCRegisterInfo.h"
26 #include <cassert>
27 #include <cstdint>
28 #include <cstdlib>
29 #include <tuple>
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "regalloc"
34 
35 // Static member used for null interference cursors.
36 const InterferenceCache::BlockInterference
37  InterferenceCache::Cursor::NoInterference;
38 
39 // Initializes PhysRegEntries (instead of a SmallVector, PhysRegEntries is a
40 // buffer of size NumPhysRegs to speed up alloc/clear for targets with large
41 // reg files). Calloced memory is used for good form, and quites tools like
42 // Valgrind too, but zero initialized memory is not required by the algorithm:
43 // this is because PhysRegEntries works like a SparseSet and its entries are
44 // only valid when there is a corresponding CacheEntries assignment. There is
45 // also support for when pass managers are reused for targets with different
46 // numbers of PhysRegs: in this case PhysRegEntries is freed and reinitialized.
48  if (PhysRegEntriesCount == TRI->getNumRegs()) return;
49  free(PhysRegEntries);
50  PhysRegEntriesCount = TRI->getNumRegs();
51  PhysRegEntries = static_cast<unsigned char*>(
52  safe_calloc(PhysRegEntriesCount, sizeof(unsigned char)));
53 }
54 
56  LiveIntervalUnion *liuarray,
57  SlotIndexes *indexes,
58  LiveIntervals *lis,
59  const TargetRegisterInfo *tri) {
60  MF = mf;
61  LIUArray = liuarray;
62  TRI = tri;
64  for (unsigned i = 0; i != CacheEntries; ++i)
65  Entries[i].clear(mf, indexes, lis);
66 }
67 
68 InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
69  unsigned E = PhysRegEntries[PhysReg];
70  if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
71  if (!Entries[E].valid(LIUArray, TRI))
72  Entries[E].revalidate(LIUArray, TRI);
73  return &Entries[E];
74  }
75  // No valid entry exists, pick the next round-robin entry.
76  E = RoundRobin;
77  if (++RoundRobin == CacheEntries)
78  RoundRobin = 0;
79  for (unsigned i = 0; i != CacheEntries; ++i) {
80  // Skip entries that are in use.
81  if (Entries[E].hasRefs()) {
82  if (++E == CacheEntries)
83  E = 0;
84  continue;
85  }
86  Entries[E].reset(PhysReg, LIUArray, TRI, MF);
87  PhysRegEntries[PhysReg] = E;
88  return &Entries[E];
89  }
90  llvm_unreachable("Ran out of interference cache entries.");
91 }
92 
93 /// revalidate - LIU contents have changed, update tags.
94 void InterferenceCache::Entry::revalidate(LiveIntervalUnion *LIUArray,
95  const TargetRegisterInfo *TRI) {
96  // Invalidate all block entries.
97  ++Tag;
98  // Invalidate all iterators.
99  PrevPos = SlotIndex();
100  unsigned i = 0;
101  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units, ++i)
102  RegUnits[i].VirtTag = LIUArray[*Units].getTag();
103 }
104 
105 void InterferenceCache::Entry::reset(unsigned physReg,
106  LiveIntervalUnion *LIUArray,
107  const TargetRegisterInfo *TRI,
108  const MachineFunction *MF) {
109  assert(!hasRefs() && "Cannot reset cache entry with references");
110  // LIU's changed, invalidate cache.
111  ++Tag;
112  PhysReg = physReg;
113  Blocks.resize(MF->getNumBlockIDs());
114 
115  // Reset iterators.
116  PrevPos = SlotIndex();
117  RegUnits.clear();
118  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
119  RegUnits.push_back(LIUArray[*Units]);
120  RegUnits.back().Fixed = &LIS->getRegUnit(*Units);
121  }
122 }
123 
124 bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
125  const TargetRegisterInfo *TRI) {
126  unsigned i = 0, e = RegUnits.size();
127  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units, ++i) {
128  if (i == e)
129  return false;
130  if (LIUArray[*Units].changedSince(RegUnits[i].VirtTag))
131  return false;
132  }
133  return i == e;
134 }
135 
136 void InterferenceCache::Entry::update(unsigned MBBNum) {
137  SlotIndex Start, Stop;
138  std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
139 
140  // Use advanceTo only when possible.
141  if (PrevPos != Start) {
142  if (!PrevPos.isValid() || Start < PrevPos) {
143  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
144  RegUnitInfo &RUI = RegUnits[i];
145  RUI.VirtI.find(Start);
146  RUI.FixedI = RUI.Fixed->find(Start);
147  }
148  } else {
149  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
150  RegUnitInfo &RUI = RegUnits[i];
151  RUI.VirtI.advanceTo(Start);
152  if (RUI.FixedI != RUI.Fixed->end())
153  RUI.FixedI = RUI.Fixed->advanceTo(RUI.FixedI, Start);
154  }
155  }
156  PrevPos = Start;
157  }
158 
160  MF->getBlockNumbered(MBBNum)->getIterator();
161  BlockInterference *BI = &Blocks[MBBNum];
162  ArrayRef<SlotIndex> RegMaskSlots;
163  ArrayRef<const uint32_t*> RegMaskBits;
164  while (true) {
165  BI->Tag = Tag;
166  BI->First = BI->Last = SlotIndex();
167 
168  // Check for first interference from virtregs.
169  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
170  LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
171  if (!I.valid())
172  continue;
173  SlotIndex StartI = I.start();
174  if (StartI >= Stop)
175  continue;
176  if (!BI->First.isValid() || StartI < BI->First)
177  BI->First = StartI;
178  }
179 
180  // Same thing for fixed interference.
181  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
182  LiveInterval::const_iterator I = RegUnits[i].FixedI;
183  LiveInterval::const_iterator E = RegUnits[i].Fixed->end();
184  if (I == E)
185  continue;
186  SlotIndex StartI = I->start;
187  if (StartI >= Stop)
188  continue;
189  if (!BI->First.isValid() || StartI < BI->First)
190  BI->First = StartI;
191  }
192 
193  // Also check for register mask interference.
194  RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
195  RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
196  SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
197  for (unsigned i = 0, e = RegMaskSlots.size();
198  i != e && RegMaskSlots[i] < Limit; ++i)
199  if (MachineOperand::clobbersPhysReg(RegMaskBits[i], PhysReg)) {
200  // Register mask i clobbers PhysReg before the LIU interference.
201  BI->First = RegMaskSlots[i];
202  break;
203  }
204 
205  PrevPos = Stop;
206  if (BI->First.isValid())
207  break;
208 
209  // No interference in this block? Go ahead and precompute the next block.
210  if (++MFI == MF->end())
211  return;
212  MBBNum = MFI->getNumber();
213  BI = &Blocks[MBBNum];
214  if (BI->Tag == Tag)
215  return;
216  std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
217  }
218 
219  // Check for last interference in block.
220  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
221  LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
222  if (!I.valid() || I.start() >= Stop)
223  continue;
224  I.advanceTo(Stop);
225  bool Backup = !I.valid() || I.start() >= Stop;
226  if (Backup)
227  --I;
228  SlotIndex StopI = I.stop();
229  if (!BI->Last.isValid() || StopI > BI->Last)
230  BI->Last = StopI;
231  if (Backup)
232  ++I;
233  }
234 
235  // Fixed interference.
236  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
237  LiveInterval::iterator &I = RegUnits[i].FixedI;
238  LiveRange *LR = RegUnits[i].Fixed;
239  if (I == LR->end() || I->start >= Stop)
240  continue;
241  I = LR->advanceTo(I, Stop);
242  bool Backup = I == LR->end() || I->start >= Stop;
243  if (Backup)
244  --I;
245  SlotIndex StopI = I->end;
246  if (!BI->Last.isValid() || StopI > BI->Last)
247  BI->Last = StopI;
248  if (Backup)
249  ++I;
250  }
251 
252  // Also check for register mask interference.
253  SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
254  for (unsigned i = RegMaskSlots.size();
255  i && RegMaskSlots[i-1].getDeadSlot() > Limit; --i)
256  if (MachineOperand::clobbersPhysReg(RegMaskBits[i-1], PhysReg)) {
257  // Register mask i-1 clobbers PhysReg after the LIU interference.
258  // Model the regmask clobber as a dead def.
259  BI->Last = RegMaskSlots[i-1].getDeadSlot();
260  break;
261  }
262 }
This class represents lattice values for constants.
Definition: AllocatorList.h:24
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID&#39;s allocated.
Segments::iterator iterator
Definition: LiveInterval.h:208
iterator advanceTo(iterator I, SlotIndex Pos)
advanceTo - Advance the specified iterator to point to the Segment containing the specified position...
Definition: LiveInterval.h:259
bool isValid() const
Returns true if this is a valid index.
Definition: SlotIndexes.h:152
bool valid() const
valid - Return true if the current position is valid, false for end().
Definition: IntervalMap.h:1359
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
void init(MachineFunction *mf, LiveIntervalUnion *liuarray, SlotIndexes *indexes, LiveIntervals *lis, const TargetRegisterInfo *tri)
init - Prepare cache for a new function.
iterator end()
Definition: LiveInterval.h:212
void advanceTo(KeyT x)
advanceTo - Move to the first interval with stop >= x, or end().
Definition: IntervalMap.h:1443
SlotIndexes pass.
Definition: SlotIndexes.h:331
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const KeyT & start() const
start - Return the beginning of the current interval.
Definition: IntervalMap.h:1365
Union of live intervals that are strong candidates for coalescing into a single register (either phys...
self_iterator getIterator()
Definition: ilist_node.h:82
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Iterator for intrusive lists based on ilist_node.
Segments::const_iterator const_iterator
Definition: LiveInterval.h:209
static Optional< unsigned > getTag(const TargetRegisterInfo *TRI, const MachineInstr &MI, const LoadInfo &LI)
static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:212
const KeyT & stop() const
stop - Return the end of the current interval.
Definition: IntervalMap.h:1368
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LLVM_ATTRIBUTE_RETURNS_NONNULL void * safe_calloc(size_t Count, size_t Sz)
Definition: MemAlloc.h:33
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:84