60 unsigned FlatScratchInitReg
67 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
68 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
74 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
75 .addReg(FlatScrInitLo)
76 .
addReg(ScratchWaveOffsetReg);
77 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI)
78 .addReg(FlatScrInitHi)
85 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
90 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
91 .addReg(FlatScrInitLo)
92 .
addReg(ScratchWaveOffsetReg);
95 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
100 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
110 if (ScratchRsrcReg == AMDGPU::NoRegister ||
112 return AMDGPU::NoRegister;
116 return ScratchRsrcReg;
131 AllSGPR128s = AllSGPR128s.
slice(std::min(static_cast<unsigned>(AllSGPR128s.
size()), NumPreloaded));
145 return ScratchRsrcReg;
150 std::pair<unsigned, unsigned>
151 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
161 if (ScratchWaveOffsetReg == AMDGPU::NoRegister ||
164 return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister);
169 return std::make_pair(ScratchWaveOffsetReg, SPReg);
174 if (NumPreloaded > AllSGPRs.
size())
175 return std::make_pair(ScratchWaveOffsetReg, SPReg);
177 AllSGPRs = AllSGPRs.
slice(NumPreloaded);
193 unsigned ReservedRegCount = 13;
195 if (AllSGPRs.
size() < ReservedRegCount)
196 return std::make_pair(ScratchWaveOffsetReg, SPReg);
198 bool HandledScratchWaveOffsetReg =
205 if (!HandledScratchWaveOffsetReg) {
206 HandledScratchWaveOffsetReg =
true;
210 ScratchWaveOffsetReg =
Reg;
216 return std::make_pair(ScratchWaveOffsetReg, SPReg);
225 emitDebuggerPrologue(MF, MBB);
227 assert(&MF.
front() == &MBB &&
"Shrink-wrapping not yet supported");
251 if (MFI->hasFlatScratchInit())
252 emitFlatScratchInit(ST, MF, MBB);
254 unsigned SPReg = MFI->getStackPtrOffsetReg();
255 if (SPReg != AMDGPU::SP_REG) {
262 if (StackSize == 0) {
263 BuildMI(MBB, MBB.
begin(), DL, TII->get(AMDGPU::COPY), SPReg)
264 .addReg(MFI->getScratchWaveOffsetReg());
266 BuildMI(MBB, MBB.
begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg)
267 .addReg(MFI->getScratchWaveOffsetReg())
272 unsigned ScratchRsrcReg
273 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
275 unsigned ScratchWaveOffsetReg;
276 std::tie(ScratchWaveOffsetReg, SPReg)
277 = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
282 if (ScratchWaveOffsetReg == AMDGPU::NoRegister) {
283 assert(ScratchRsrcReg == AMDGPU::NoRegister);
288 unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
291 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
293 PreloadedPrivateBufferReg = MFI->getPreloadedReg(
297 bool OffsetRegUsed = MRI.
isPhysRegUsed(ScratchWaveOffsetReg);
298 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister &&
304 assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister &&
305 "scratch wave offset input is required");
306 MRI.
addLiveIn(PreloadedScratchWaveOffsetReg);
307 MBB.
addLiveIn(PreloadedScratchWaveOffsetReg);
310 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
312 MRI.
addLiveIn(PreloadedPrivateBufferReg);
313 MBB.
addLiveIn(PreloadedPrivateBufferReg);
318 if (&OtherBB == &MBB)
322 OtherBB.addLiveIn(ScratchWaveOffsetReg);
325 OtherBB.addLiveIn(ScratchRsrcReg);
334 bool CopyBuffer = ResourceRegUsed &&
335 PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
337 ScratchRsrcReg != PreloadedPrivateBufferReg;
342 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
343 ScratchWaveOffsetReg);
344 if (CopyBuffer && CopyBufferFirst) {
345 BuildMI(MBB, I, DL, TII->
get(AMDGPU::COPY), ScratchRsrcReg)
350 PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
351 BuildMI(MBB, I, DL, TII->
get(AMDGPU::COPY), ScratchWaveOffsetReg)
352 .addReg(PreloadedScratchWaveOffsetReg,
356 if (CopyBuffer && !CopyBufferFirst) {
357 BuildMI(MBB, I, DL, TII->
get(AMDGPU::COPY), ScratchRsrcReg)
362 emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I,
363 PreloadedPrivateBufferReg, ScratchRsrcReg);
367 void SIFrameLowering::emitEntryFunctionScratchSetup(
const GCNSubtarget &ST,
370 unsigned ScratchRsrcReg)
const {
380 unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
381 unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
382 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
384 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
387 BuildMI(MBB, I, DL, SMovB32, RsrcHi)
391 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64);
392 BuildMI(MBB, I, DL, GetPC64, Rsrc01);
394 auto GitPtrLo = AMDGPU::SGPR0;
401 GitPtrLo = AMDGPU::SGPR8;
409 BuildMI(MBB, I, DL, SMovB32, RsrcLo)
419 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
426 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
435 || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) {
437 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
439 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
440 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
446 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
449 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
451 BuildMI(MBB, I, DL, Mov64, Rsrc01)
455 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
466 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
474 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
475 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
477 BuildMI(MBB, I, DL, SMovB32, Rsrc0)
481 BuildMI(MBB, I, DL, SMovB32, Rsrc1)
487 BuildMI(MBB, I, DL, SMovB32, Rsrc2)
488 .
addImm(Rsrc23 & 0xffffffff)
491 BuildMI(MBB, I, DL, SMovB32, Rsrc3)
517 const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(MF);
518 for (
unsigned i = 0; CSRegs[i]; ++i)
519 LiveRegs.
addReg(CSRegs[i]);
523 for (
unsigned Reg : AMDGPU::SReg_32_XM0RegClass) {
528 return AMDGPU::NoRegister;
552 bool NeedFP =
hasFP(MF);
555 const bool NeedsRealignment = TRI.needsStackRealignment(MF);
557 if (NeedsRealignment) {
561 RoundedSize += Alignment;
564 assert(ScratchSPReg != AMDGPU::NoRegister);
568 BuildMI(MBB, MBBI, DL, TII->
get(AMDGPU::S_ADD_U32), ScratchSPReg)
572 BuildMI(MBB, MBBI, DL, TII->
get(AMDGPU::S_AND_B32), FramePtrReg)
582 BuildMI(MBB, MBBI, DL, TII->
get(AMDGPU::COPY), FramePtrReg)
587 if (RoundedSize != 0 &&
hasSP(MF)) {
588 BuildMI(MBB, MBBI, DL, TII->
get(AMDGPU::S_ADD_U32), StackPtrReg)
596 if (!
Reg.FI.hasValue())
598 TII->storeRegToStackSlot(MBB, MBBI,
Reg.VGPR,
true,
599 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
600 &TII->getRegisterInfo());
616 if (!
Reg.FI.hasValue())
618 TII->loadRegFromStackSlot(MBB, MBBI,
Reg.VGPR,
619 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
620 &TII->getRegisterInfo());
624 if (StackPtrReg == AMDGPU::NoRegister)
635 if (NumBytes != 0 &&
hasSP(MF)) {
639 BuildMI(MBB, MBBI, DL, TII->
get(AMDGPU::S_SUB_U32), StackPtrReg)
656 unsigned &FrameReg)
const {
675 bool AllSGPRSpilledToVGPRs =
false;
678 AllSGPRSpilledToVGPRs =
true;
690 for (
auto I = MBB.
begin(),
E = MBB.
end(); I !=
E; I = Next) {
694 if (TII->isSGPRSpill(MI)) {
695 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
700 assert(Spilled &&
"failed to spill SGPR to VGPR when allocated");
702 AllSGPRSpilledToVGPRs =
false;
715 assert(RS &&
"RegScavenger required if spilling");
731 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0,
false);
749 int64_t Amount = I->getOperand(0).getImm();
755 const DebugLoc &DL = I->getDebugLoc();
756 unsigned Opc = I->getOpcode();
757 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
758 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
764 Amount =
alignTo(Amount, Align);
769 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
773 }
else if (CalleePopAmount != 0) {
791 for (
unsigned i = 0; i < 3; ++i) {
799 unsigned WorkGroupIDVGPR =
801 BuildMI(MBB, I, DL, TII->
get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR)
802 .addReg(WorkGroupIDSGPR);
806 TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR,
false,
807 WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
816 TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR,
false,
817 WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
unsigned getFrameOffsetReg() const
unsigned getScratchWaveOffsetReg() const
constexpr bool isUInt< 32 >(uint64_t x)
int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Interface definition for SIRegisterInfo.
unsigned reservedPrivateSegmentBufferReg(const MachineFunction &MF) const
Return the end register initially reserved for the scratch buffer in case spilling is needed...
AMDGPU specific subclass of TargetSubtarget.
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock &MBB)
bool isAllocatable(unsigned PhysReg) const
isAllocatable - Returns true when PhysReg belongs to an allocatable register class and it hasn't been...
This class represents lattice values for constants.
unsigned getStackPtrOffsetReg() const
bool hasStackObjects() const
Return true if there are any stack objects in this function.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Describe properties that are true of each instruction in the target description file.
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
void removeSGPRToVGPRFrameIndices(MachineFrameInfo &MFI)
bool hasImplicitBufferPtr() const
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
void emitEntryFunctionPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const
const SIInstrInfo * getInstrInfo() const override
bool hasMergedShaders() const
unsigned const TargetRegisterInfo * TRI
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
void setIsStackRealigned(bool Realigned=true)
static IntegerType * getInt64Ty(LLVMContext &C)
bool hasSpilledVGPRs() const
const SIRegisterInfo & getRegisterInfo() const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getDebuggerWorkGroupIDStackObjectIndex(unsigned Dim) const
Calling convention used for Mesa/AMDPAL geometry shaders.
void setScratchRSrcReg(unsigned Reg)
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Address space for constant memory (VTX2)
bool isMesaGfxShader(const Function &F) const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
Calling convention used for Mesa/AMDPAL compute shaders.
const HexagonInstrInfo * TII
uint64_t getScratchRsrcWords23() const
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
bool isEntryFunction() const
MachineBasicBlock iterator that automatically skips over MIs that are inside bundles (i...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
static bool allStackObjectsAreDead(const MachineFrameInfo &MFI)
The memory access is dereferenceable (i.e., doesn't trap).
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
unsigned getGITPtrHigh() const
int getObjectIndexBegin() const
Return the minimum frame object index.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool allocateSGPRSpillToVGPR(MachineFunction &MF, int FI)
Reserve a slice of a VGPR to support spilling for FrameIndex FI.
Class to represent pointers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool debuggerEmitPrologue() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
unsigned getMaxNumSGPRs(unsigned WavesPerEU, bool Addressable) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
bool isCompute(CallingConv::ID cc)
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
This file declares the machine register scavenger class.
unsigned const MachineRegisterInfo * MRI
unsigned getFrameRegister(const MachineFunction &MF) const override
unsigned reservedPrivateSegmentWaveByteOffsetReg(const MachineFunction &MF) const
Return the end register initially reserved for the scratch wave offset in case spilling is needed...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
static ArrayRef< MCPhysReg > getAllSGPR128(const GCNSubtarget &ST, const MachineFunction &MF)
static ArrayRef< MCPhysReg > getAllSGPRs(const GCNSubtarget &ST, const MachineFunction &MF)
unsigned getMaxAlignment() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
ArrayRef< SGPRSpillVGPRCSR > getSGPRSpillVGPRs() const
void setScratchWaveOffsetReg(unsigned Reg)
const MachineBasicBlock & front() const
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This class contains a discriminated union of information about pointers in memory operands...
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
virtual bool hasReservedCallFrame(const MachineFunction &MF) const
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required, we reserve argument space for call sites in the function immediately on entry to the current function.
unsigned getWavefrontSize() const
bool hasSpilledSGPRs() const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
unsigned getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses...
bool isStackRealigned() const
Information about stack frame layout on the target.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
bool hasSGPRInitBug() const
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
bool isPhysRegUsed(unsigned PhysReg) const
Return true if the specified register is modified or read in this function.
bool eliminateSGPRToVGPRSpillFrameIndex(MachineBasicBlock::iterator MI, int FI, RegScavenger *RS) const
Special case of eliminateFrameIndex.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array...
void replaceRegWith(unsigned FromReg, unsigned ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
bool flatScratchIsPointer() const
Provides AMDGPU specific target descriptions.
Representation of each machine instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Interface definition for SIInstrInfo.
unsigned getPreloadedReg(AMDGPUFunctionArgInfo::PreloadedValue Value) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Calling convention used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
unsigned getImplicitBufferPtrUserSGPR() const
virtual const TargetFrameLowering * getFrameLowering() const
The memory access always returns the same value (or traps).
bool isAmdHsaOrMesa(const Function &F) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool spillSGPRToVGPR() const
bool hasSP(const MachineFunction &MF) const
uint8_t getStackID(int ObjectIdx) const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned getWorkItemIDVGPR(unsigned Dim) const
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
unsigned getNumPreloadedSGPRs() const
int getDebuggerWorkItemIDStackObjectIndex(unsigned Dim) const
unsigned getWorkGroupIDSGPR(unsigned Dim) const
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isReserved(unsigned PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
bool hasCalls() const
Return true if the current function has any function calls.
bool hasNonSpillStackObjects() const
const SIRegisterInfo * getRegisterInfo() const override