24 #define DEBUG_TYPE "llvm-mca" 29 void DispatchStage::notifyInstructionDispatched(
const InstRef &
IR,
30 ArrayRef<unsigned> UsedRegs,
31 unsigned UOps)
const {
32 LLVM_DEBUG(
dbgs() <<
"[E] Instruction Dispatched: #" << IR <<
'\n');
33 notifyEvent<HWInstructionEvent>(
34 HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
37 bool DispatchStage::checkPRF(
const InstRef &IR)
const {
38 SmallVector<unsigned, 4> RegDefs;
39 for (
const WriteState &RegDef : IR.getInstruction()->getDefs())
40 RegDefs.emplace_back(RegDef.getRegisterID());
45 notifyEvent<HWStallEvent>(
53 bool DispatchStage::checkRCU(
const InstRef &IR)
const {
54 const unsigned NumMicroOps = IR.getInstruction()->getDesc().NumMicroOps;
57 notifyEvent<HWStallEvent>(
62 bool DispatchStage::canDispatch(
const InstRef &IR)
const {
66 void DispatchStage::updateRAWDependencies(ReadState &RS,
67 const MCSubtargetInfo &STI) {
68 SmallVector<WriteRef, 4> DependentWrites;
76 const ReadDescriptor &RD = RS.getDescriptor();
77 const MCSchedModel &SM = STI.getSchedModel();
78 const MCSchedClassDesc *
SC = SM.getSchedClassDesc(RD.SchedClassID);
79 for (WriteRef &WR : DependentWrites) {
80 WriteState &WS = *WR.getWriteState();
81 unsigned WriteResID = WS.getWriteResourceID();
82 int ReadAdvance = STI.getReadAdvanceCycles(SC, RD.UseIndex, WriteResID);
83 WS.addUser(&RS, ReadAdvance);
87 Error DispatchStage::dispatch(InstRef IR) {
88 assert(!CarryOver &&
"Cannot dispatch another instruction!");
89 Instruction &IS = *IR.getInstruction();
90 const InstrDesc &Desc = IS.getDesc();
91 const unsigned NumMicroOps = Desc.NumMicroOps;
92 if (NumMicroOps > DispatchWidth) {
93 assert(AvailableEntries == DispatchWidth);
95 CarryOver = NumMicroOps - DispatchWidth;
98 assert(AvailableEntries >= NumMicroOps);
99 AvailableEntries -= NumMicroOps;
104 AvailableEntries = 0;
107 bool IsEliminated =
false;
108 if (IS.isOptimizableMove()) {
109 assert(IS.getDefs().size() == 1 &&
"Expected a single input!");
110 assert(IS.getUses().size() == 1 &&
"Expected a single output!");
124 for (ReadState &RS : IS.getUses())
125 updateRAWDependencies(RS, STI);
132 for (WriteState &WS : IS.getDefs())
141 notifyInstructionDispatched(IR, RegisterFiles,
142 std::min(DispatchWidth, NumMicroOps));
150 AvailableEntries = DispatchWidth;
154 AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
155 unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
156 CarryOver -= DispatchedOpcodes;
157 assert(CarriedOver &&
"Invalid dispatched instruction");
160 notifyInstructionDispatched(CarriedOver, RegisterFiles, DispatchedOpcodes);
169 if (Required > AvailableEntries)
172 if (Desc.
BeginGroup && AvailableEntries != DispatchWidth)
178 return canDispatch(IR);
182 assert(canDispatch(IR) &&
"Cannot dispatch another instruction!");
Instruction * getInstruction()
This class represents lattice values for constants.
void addRegisterWrite(WriteRef Write, MutableArrayRef< unsigned > UsedPhysRegs)
Subclass of Error for the sole purpose of identifying the success path in the type system...
unsigned getNumRegisterFiles() const
bool checkNextStage(const InstRef &IR) const
An InstRef contains both a SourceMgr index and Instruction pair.
Error cycleStart() override
Called once at the start of each cycle.
const InstrDesc & getDesc() const
unsigned isAvailable(ArrayRef< unsigned > Regs) const
This file defines the main interface for hardware event listeners.
CHAIN = SC CHAIN, Imm128 - System call.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Error moveToTheNextStage(InstRef &IR)
Called when an instruction is ready to move the next pipeline stage.
An instruction descriptor.
This file models the dispatch component of an instruction pipeline.
bool isAvailable(unsigned Quantity=1) const
bool tryEliminateMove(WriteState &WS, ReadState &RS)
Error execute(InstRef &IR) override
The primary action that this stage performs on instruction IR.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void addRegisterRead(ReadState &RS, SmallVectorImpl< WriteRef > &Writes) const
Lightweight error class with error context and mandatory checking.
A scheduler for Processor Resource Units and Processor Resource Groups.
unsigned reserveSlot(const InstRef &IS, unsigned NumMicroOps)
Statically lint checks LLVM IR
bool isAvailable(const InstRef &IR) const override
Returns true if it can execute IR during this cycle.