LLVM  8.0.1
TargetInstrInfo.h
Go to the documentation of this file.
1 //===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the target machine instruction set to the code generator.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_TARGET_TARGETINSTRINFO_H
15 #define LLVM_TARGET_TARGETINSTRINFO_H
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DenseMapInfo.h"
20 #include "llvm/ADT/None.h"
30 #include "llvm/MC/MCInstrInfo.h"
33 #include <cassert>
34 #include <cstddef>
35 #include <cstdint>
36 #include <utility>
37 #include <vector>
38 
39 namespace llvm {
40 
41 class DFAPacketizer;
42 class InstrItineraryData;
43 class LiveIntervals;
44 class LiveVariables;
45 class MachineMemOperand;
46 class MachineRegisterInfo;
47 class MCAsmInfo;
48 class MCInst;
49 struct MCSchedModel;
50 class Module;
51 class ScheduleDAG;
52 class ScheduleHazardRecognizer;
53 class SDNode;
54 class SelectionDAG;
55 class RegScavenger;
56 class TargetRegisterClass;
57 class TargetRegisterInfo;
58 class TargetSchedModel;
59 class TargetSubtargetInfo;
60 
61 template <class T> class SmallVectorImpl;
62 
63 //---------------------------------------------------------------------------
64 ///
65 /// TargetInstrInfo - Interface to description of machine instruction set
66 ///
67 class TargetInstrInfo : public MCInstrInfo {
68 public:
69  TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
70  unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
71  : CallFrameSetupOpcode(CFSetupOpcode),
72  CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
73  ReturnOpcode(ReturnOpcode) {}
74  TargetInstrInfo(const TargetInstrInfo &) = delete;
75  TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
76  virtual ~TargetInstrInfo();
77 
78  static bool isGenericOpcode(unsigned Opc) {
79  return Opc <= TargetOpcode::GENERIC_OP_END;
80  }
81 
82  /// Given a machine instruction descriptor, returns the register
83  /// class constraint for OpNum, or NULL.
84  const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
85  const TargetRegisterInfo *TRI,
86  const MachineFunction &MF) const;
87 
88  /// Return true if the instruction is trivially rematerializable, meaning it
89  /// has no side effects and requires no operands that aren't always available.
90  /// This means the only allowed uses are constants and unallocatable physical
91  /// registers so that the instructions result is independent of the place
92  /// in the function.
94  AliasAnalysis *AA = nullptr) const {
95  return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
96  (MI.getDesc().isRematerializable() &&
98  isReallyTriviallyReMaterializableGeneric(MI, AA)));
99  }
100 
101 protected:
102  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
103  /// set, this hook lets the target specify whether the instruction is actually
104  /// trivially rematerializable, taking into consideration its operands. This
105  /// predicate must return false if the instruction has any side effects other
106  /// than producing a value, or if it requres any address registers that are
107  /// not always available.
108  /// Requirements must be check as stated in isTriviallyReMaterializable() .
110  AliasAnalysis *AA) const {
111  return false;
112  }
113 
114  /// This method commutes the operands of the given machine instruction MI.
115  /// The operands to be commuted are specified by their indices OpIdx1 and
116  /// OpIdx2.
117  ///
118  /// If a target has any instructions that are commutable but require
119  /// converting to different instructions or making non-trivial changes
120  /// to commute them, this method can be overloaded to do that.
121  /// The default implementation simply swaps the commutable operands.
122  ///
123  /// If NewMI is false, MI is modified in place and returned; otherwise, a
124  /// new machine instruction is created and returned.
125  ///
126  /// Do not call this method for a non-commutable instruction.
127  /// Even though the instruction is commutable, the method may still
128  /// fail to commute the operands, null pointer is returned in such cases.
129  virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
130  unsigned OpIdx1,
131  unsigned OpIdx2) const;
132 
133  /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
134  /// operand indices to (ResultIdx1, ResultIdx2).
135  /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
136  /// predefined to some indices or be undefined (designated by the special
137  /// value 'CommuteAnyOperandIndex').
138  /// The predefined result indices cannot be re-defined.
139  /// The function returns true iff after the result pair redefinition
140  /// the fixed result pair is equal to or equivalent to the source pair of
141  /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
142  /// the pairs (x,y) and (y,x) are equivalent.
143  static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
144  unsigned CommutableOpIdx1,
145  unsigned CommutableOpIdx2);
146 
147 private:
148  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
149  /// set and the target hook isReallyTriviallyReMaterializable returns false,
150  /// this function does target-independent tests to determine if the
151  /// instruction is really trivially rematerializable.
152  bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
153  AliasAnalysis *AA) const;
154 
155 public:
156  /// These methods return the opcode of the frame setup/destroy instructions
157  /// if they exist (-1 otherwise). Some targets use pseudo instructions in
158  /// order to abstract away the difference between operating with a frame
159  /// pointer and operating without, through the use of these two instructions.
160  ///
161  unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
162  unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
163 
164  /// Returns true if the argument is a frame pseudo instruction.
165  bool isFrameInstr(const MachineInstr &I) const {
166  return I.getOpcode() == getCallFrameSetupOpcode() ||
168  }
169 
170  /// Returns true if the argument is a frame setup pseudo instruction.
171  bool isFrameSetup(const MachineInstr &I) const {
172  return I.getOpcode() == getCallFrameSetupOpcode();
173  }
174 
175  /// Returns size of the frame associated with the given frame instruction.
176  /// For frame setup instruction this is frame that is set up space set up
177  /// after the instruction. For frame destroy instruction this is the frame
178  /// freed by the caller.
179  /// Note, in some cases a call frame (or a part of it) may be prepared prior
180  /// to the frame setup instruction. It occurs in the calls that involve
181  /// inalloca arguments. This function reports only the size of the frame part
182  /// that is set up between the frame setup and destroy pseudo instructions.
183  int64_t getFrameSize(const MachineInstr &I) const {
184  assert(isFrameInstr(I) && "Not a frame instruction");
185  assert(I.getOperand(0).getImm() >= 0);
186  return I.getOperand(0).getImm();
187  }
188 
189  /// Returns the total frame size, which is made up of the space set up inside
190  /// the pair of frame start-stop instructions and the space that is set up
191  /// prior to the pair.
192  int64_t getFrameTotalSize(const MachineInstr &I) const {
193  if (isFrameSetup(I)) {
194  assert(I.getOperand(1).getImm() >= 0 &&
195  "Frame size must not be negative");
196  return getFrameSize(I) + I.getOperand(1).getImm();
197  }
198  return getFrameSize(I);
199  }
200 
201  unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
202  unsigned getReturnOpcode() const { return ReturnOpcode; }
203 
204  /// Returns the actual stack pointer adjustment made by an instruction
205  /// as part of a call sequence. By default, only call frame setup/destroy
206  /// instructions adjust the stack, but targets may want to override this
207  /// to enable more fine-grained adjustment, or adjust by a different value.
208  virtual int getSPAdjust(const MachineInstr &MI) const;
209 
210  /// Return true if the instruction is a "coalescable" extension instruction.
211  /// That is, it's like a copy where it's legal for the source to overlap the
212  /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
213  /// expected the pre-extension value is available as a subreg of the result
214  /// register. This also returns the sub-register index in SubIdx.
215  virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
216  unsigned &DstReg, unsigned &SubIdx) const {
217  return false;
218  }
219 
220  /// If the specified machine instruction is a direct
221  /// load from a stack slot, return the virtual or physical register number of
222  /// the destination along with the FrameIndex of the loaded stack slot. If
223  /// not, return 0. This predicate must return 0 if the instruction has
224  /// any side effects other than loading from the stack slot.
225  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
226  int &FrameIndex) const {
227  return 0;
228  }
229 
230  /// Optional extension of isLoadFromStackSlot that returns the number of
231  /// bytes loaded from the stack. This must be implemented if a backend
232  /// supports partial stack slot spills/loads to further disambiguate
233  /// what the load does.
234  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
235  int &FrameIndex,
236  unsigned &MemBytes) const {
237  MemBytes = 0;
238  return isLoadFromStackSlot(MI, FrameIndex);
239  }
240 
241  /// Check for post-frame ptr elimination stack locations as well.
242  /// This uses a heuristic so it isn't reliable for correctness.
243  virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
244  int &FrameIndex) const {
245  return 0;
246  }
247 
248  /// If the specified machine instruction has a load from a stack slot,
249  /// return true along with the FrameIndices of the loaded stack slot and the
250  /// machine mem operands containing the reference.
251  /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
252  /// any instructions that loads from the stack. This is just a hint, as some
253  /// cases may be missed.
254  virtual bool hasLoadFromStackSlot(
255  const MachineInstr &MI,
257 
258  /// If the specified machine instruction is a direct
259  /// store to a stack slot, return the virtual or physical register number of
260  /// the source reg along with the FrameIndex of the loaded stack slot. If
261  /// not, return 0. This predicate must return 0 if the instruction has
262  /// any side effects other than storing to the stack slot.
263  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
264  int &FrameIndex) const {
265  return 0;
266  }
267 
268  /// Optional extension of isStoreToStackSlot that returns the number of
269  /// bytes stored to the stack. This must be implemented if a backend
270  /// supports partial stack slot spills/loads to further disambiguate
271  /// what the store does.
272  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
273  int &FrameIndex,
274  unsigned &MemBytes) const {
275  MemBytes = 0;
276  return isStoreToStackSlot(MI, FrameIndex);
277  }
278 
279  /// Check for post-frame ptr elimination stack locations as well.
280  /// This uses a heuristic, so it isn't reliable for correctness.
281  virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
282  int &FrameIndex) const {
283  return 0;
284  }
285 
286  /// If the specified machine instruction has a store to a stack slot,
287  /// return true along with the FrameIndices of the loaded stack slot and the
288  /// machine mem operands containing the reference.
289  /// If not, return false. Unlike isStoreToStackSlot,
290  /// this returns true for any instructions that stores to the
291  /// stack. This is just a hint, as some cases may be missed.
292  virtual bool hasStoreToStackSlot(
293  const MachineInstr &MI,
295 
296  /// Return true if the specified machine instruction
297  /// is a copy of one stack slot to another and has no other effect.
298  /// Provide the identity of the two frame indices.
299  virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
300  int &SrcFrameIndex) const {
301  return false;
302  }
303 
304  /// Compute the size in bytes and offset within a stack slot of a spilled
305  /// register or subregister.
306  ///
307  /// \param [out] Size in bytes of the spilled value.
308  /// \param [out] Offset in bytes within the stack slot.
309  /// \returns true if both Size and Offset are successfully computed.
310  ///
311  /// Not all subregisters have computable spill slots. For example,
312  /// subregisters registers may not be byte-sized, and a pair of discontiguous
313  /// subregisters has no single offset.
314  ///
315  /// Targets with nontrivial bigendian implementations may need to override
316  /// this, particularly to support spilled vector registers.
317  virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
318  unsigned &Size, unsigned &Offset,
319  const MachineFunction &MF) const;
320 
321  /// Returns the size in bytes of the specified MachineInstr, or ~0U
322  /// when this function is not implemented by a target.
323  virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
324  return ~0U;
325  }
326 
327  /// Return true if the instruction is as cheap as a move instruction.
328  ///
329  /// Targets for different archs need to override this, and different
330  /// micro-architectures can also be finely tuned inside.
331  virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
332  return MI.isAsCheapAsAMove();
333  }
334 
335  /// Return true if the instruction should be sunk by MachineSink.
336  ///
337  /// MachineSink determines on its own whether the instruction is safe to sink;
338  /// this gives the target a hook to override the default behavior with regards
339  /// to which instructions should be sunk.
340  virtual bool shouldSink(const MachineInstr &MI) const { return true; }
341 
342  /// Re-issue the specified 'original' instruction at the
343  /// specific location targeting a new destination register.
344  /// The register in Orig->getOperand(0).getReg() will be substituted by
345  /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
346  /// SubIdx.
347  virtual void reMaterialize(MachineBasicBlock &MBB,
348  MachineBasicBlock::iterator MI, unsigned DestReg,
349  unsigned SubIdx, const MachineInstr &Orig,
350  const TargetRegisterInfo &TRI) const;
351 
352  /// Clones instruction or the whole instruction bundle \p Orig and
353  /// insert into \p MBB before \p InsertBefore. The target may update operands
354  /// that are required to be unique.
355  ///
356  /// \p Orig must not return true for MachineInstr::isNotDuplicable().
358  MachineBasicBlock::iterator InsertBefore,
359  const MachineInstr &Orig) const;
360 
361  /// This method must be implemented by targets that
362  /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
363  /// may be able to convert a two-address instruction into one or more true
364  /// three-address instructions on demand. This allows the X86 target (for
365  /// example) to convert ADD and SHL instructions into LEA instructions if they
366  /// would require register copies due to two-addressness.
367  ///
368  /// This method returns a null pointer if the transformation cannot be
369  /// performed, otherwise it returns the last new instruction.
370  ///
372  MachineInstr &MI,
373  LiveVariables *LV) const {
374  return nullptr;
375  }
376 
377  // This constant can be used as an input value of operand index passed to
378  // the method findCommutedOpIndices() to tell the method that the
379  // corresponding operand index is not pre-defined and that the method
380  // can pick any commutable operand.
381  static const unsigned CommuteAnyOperandIndex = ~0U;
382 
383  /// This method commutes the operands of the given machine instruction MI.
384  ///
385  /// The operands to be commuted are specified by their indices OpIdx1 and
386  /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
387  /// 'CommuteAnyOperandIndex', which means that the method is free to choose
388  /// any arbitrarily chosen commutable operand. If both arguments are set to
389  /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
390  /// operands; then commutes them if such operands could be found.
391  ///
392  /// If NewMI is false, MI is modified in place and returned; otherwise, a
393  /// new machine instruction is created and returned.
394  ///
395  /// Do not call this method for a non-commutable instruction or
396  /// for non-commuable operands.
397  /// Even though the instruction is commutable, the method may still
398  /// fail to commute the operands, null pointer is returned in such cases.
399  MachineInstr *
400  commuteInstruction(MachineInstr &MI, bool NewMI = false,
401  unsigned OpIdx1 = CommuteAnyOperandIndex,
402  unsigned OpIdx2 = CommuteAnyOperandIndex) const;
403 
404  /// Returns true iff the routine could find two commutable operands in the
405  /// given machine instruction.
406  /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
407  /// If any of the INPUT values is set to the special value
408  /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
409  /// operand, then returns its index in the corresponding argument.
410  /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
411  /// looks for 2 commutable operands.
412  /// If INPUT values refer to some operands of MI, then the method simply
413  /// returns true if the corresponding operands are commutable and returns
414  /// false otherwise.
415  ///
416  /// For example, calling this method this way:
417  /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
418  /// findCommutedOpIndices(MI, Op1, Op2);
419  /// can be interpreted as a query asking to find an operand that would be
420  /// commutable with the operand#1.
421  virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
422  unsigned &SrcOpIdx2) const;
423 
424  /// A pair composed of a register and a sub-register index.
425  /// Used to give some type checking when modeling Reg:SubReg.
426  struct RegSubRegPair {
427  unsigned Reg;
428  unsigned SubReg;
429 
430  RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
431  : Reg(Reg), SubReg(SubReg) {}
432  };
433 
434  /// A pair composed of a pair of a register and a sub-register index,
435  /// and another sub-register index.
436  /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
438  unsigned SubIdx;
439 
440  RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
441  unsigned SubIdx = 0)
442  : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
443  };
444 
445  /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
446  /// and \p DefIdx.
447  /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
448  /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
449  /// flag are not added to this list.
450  /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
451  /// two elements:
452  /// - %1:sub1, sub0
453  /// - %2<:0>, sub1
454  ///
455  /// \returns true if it is possible to build such an input sequence
456  /// with the pair \p MI, \p DefIdx. False otherwise.
457  ///
458  /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
459  ///
460  /// \note The generic implementation does not provide any support for
461  /// MI.isRegSequenceLike(). In other words, one has to override
462  /// getRegSequenceLikeInputs for target specific instructions.
463  bool
464  getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
465  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
466 
467  /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
468  /// and \p DefIdx.
469  /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
470  /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
471  /// - %1:sub1, sub0
472  ///
473  /// \returns true if it is possible to build such an input sequence
474  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
475  /// False otherwise.
476  ///
477  /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
478  ///
479  /// \note The generic implementation does not provide any support for
480  /// MI.isExtractSubregLike(). In other words, one has to override
481  /// getExtractSubregLikeInputs for target specific instructions.
482  bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
483  RegSubRegPairAndIdx &InputReg) const;
484 
485  /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
486  /// and \p DefIdx.
487  /// \p [out] BaseReg and \p [out] InsertedReg contain
488  /// the equivalent inputs of INSERT_SUBREG.
489  /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
490  /// - BaseReg: %0:sub0
491  /// - InsertedReg: %1:sub1, sub3
492  ///
493  /// \returns true if it is possible to build such an input sequence
494  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
495  /// False otherwise.
496  ///
497  /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
498  ///
499  /// \note The generic implementation does not provide any support for
500  /// MI.isInsertSubregLike(). In other words, one has to override
501  /// getInsertSubregLikeInputs for target specific instructions.
502  bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
503  RegSubRegPair &BaseReg,
504  RegSubRegPairAndIdx &InsertedReg) const;
505 
506  /// Return true if two machine instructions would produce identical values.
507  /// By default, this is only true when the two instructions
508  /// are deemed identical except for defs. If this function is called when the
509  /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
510  /// aggressive checks.
511  virtual bool produceSameValue(const MachineInstr &MI0,
512  const MachineInstr &MI1,
513  const MachineRegisterInfo *MRI = nullptr) const;
514 
515  /// \returns true if a branch from an instruction with opcode \p BranchOpc
516  /// bytes is capable of jumping to a position \p BrOffset bytes away.
517  virtual bool isBranchOffsetInRange(unsigned BranchOpc,
518  int64_t BrOffset) const {
519  llvm_unreachable("target did not implement");
520  }
521 
522  /// \returns The block that branch instruction \p MI jumps to.
524  llvm_unreachable("target did not implement");
525  }
526 
527  /// Insert an unconditional indirect branch at the end of \p MBB to \p
528  /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to
529  /// the offset of the position to insert the new branch.
530  ///
531  /// \returns The number of bytes added to the block.
533  MachineBasicBlock &NewDestBB,
534  const DebugLoc &DL,
535  int64_t BrOffset = 0,
536  RegScavenger *RS = nullptr) const {
537  llvm_unreachable("target did not implement");
538  }
539 
540  /// Analyze the branching code at the end of MBB, returning
541  /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
542  /// implemented for a target). Upon success, this returns false and returns
543  /// with the following information in various cases:
544  ///
545  /// 1. If this block ends with no branches (it just falls through to its succ)
546  /// just return false, leaving TBB/FBB null.
547  /// 2. If this block ends with only an unconditional branch, it sets TBB to be
548  /// the destination block.
549  /// 3. If this block ends with a conditional branch and it falls through to a
550  /// successor block, it sets TBB to be the branch destination block and a
551  /// list of operands that evaluate the condition. These operands can be
552  /// passed to other TargetInstrInfo methods to create new branches.
553  /// 4. If this block ends with a conditional branch followed by an
554  /// unconditional branch, it returns the 'true' destination in TBB, the
555  /// 'false' destination in FBB, and a list of operands that evaluate the
556  /// condition. These operands can be passed to other TargetInstrInfo
557  /// methods to create new branches.
558  ///
559  /// Note that removeBranch and insertBranch must be implemented to support
560  /// cases where this method returns success.
561  ///
562  /// If AllowModify is true, then this routine is allowed to modify the basic
563  /// block (e.g. delete instructions after the unconditional branch).
564  ///
565  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
566  /// before calling this function.
568  MachineBasicBlock *&FBB,
570  bool AllowModify = false) const {
571  return true;
572  }
573 
574  /// Represents a predicate at the MachineFunction level. The control flow a
575  /// MachineBranchPredicate represents is:
576  ///
577  /// Reg = LHS `Predicate` RHS == ConditionDef
578  /// if Reg then goto TrueDest else goto FalseDest
579  ///
582  PRED_EQ, // True if two values are equal
583  PRED_NE, // True if two values are not equal
584  PRED_INVALID // Sentinel value
585  };
586 
587  ComparePredicate Predicate = PRED_INVALID;
590  MachineBasicBlock *TrueDest = nullptr;
591  MachineBasicBlock *FalseDest = nullptr;
592  MachineInstr *ConditionDef = nullptr;
593 
594  /// SingleUseCondition is true if ConditionDef is dead except for the
595  /// branch(es) at the end of the basic block.
596  ///
597  bool SingleUseCondition = false;
598 
599  explicit MachineBranchPredicate() = default;
600  };
601 
602  /// Analyze the branching code at the end of MBB and parse it into the
603  /// MachineBranchPredicate structure if possible. Returns false on success
604  /// and true on failure.
605  ///
606  /// If AllowModify is true, then this routine is allowed to modify the basic
607  /// block (e.g. delete instructions after the unconditional branch).
608  ///
611  bool AllowModify = false) const {
612  return true;
613  }
614 
615  /// Remove the branching code at the end of the specific MBB.
616  /// This is only invoked in cases where AnalyzeBranch returns success. It
617  /// returns the number of instructions that were removed.
618  /// If \p BytesRemoved is non-null, report the change in code size from the
619  /// removed instructions.
620  virtual unsigned removeBranch(MachineBasicBlock &MBB,
621  int *BytesRemoved = nullptr) const {
622  llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
623  }
624 
625  /// Insert branch code into the end of the specified MachineBasicBlock. The
626  /// operands to this method are the same as those returned by AnalyzeBranch.
627  /// This is only invoked in cases where AnalyzeBranch returns success. It
628  /// returns the number of instructions inserted. If \p BytesAdded is non-null,
629  /// report the change in code size from the added instructions.
630  ///
631  /// It is also invoked by tail merging to add unconditional branches in
632  /// cases where AnalyzeBranch doesn't apply because there was no original
633  /// branch to analyze. At least this much must be implemented, else tail
634  /// merging needs to be disabled.
635  ///
636  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
637  /// before calling this function.
638  virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
639  MachineBasicBlock *FBB,
641  const DebugLoc &DL,
642  int *BytesAdded = nullptr) const {
643  llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
644  }
645 
647  MachineBasicBlock *DestBB,
648  const DebugLoc &DL,
649  int *BytesAdded = nullptr) const {
650  return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
651  BytesAdded);
652  }
653 
654  /// Analyze the loop code, return true if it cannot be understoo. Upon
655  /// success, this function returns false and returns information about the
656  /// induction variable and compare instruction used at the end.
657  virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
658  MachineInstr *&CmpInst) const {
659  return true;
660  }
661 
662  /// Generate code to reduce the loop iteration by one and check if the loop
663  /// is finished. Return the value/register of the new loop count. We need
664  /// this function when peeling off one or more iterations of a loop. This
665  /// function assumes the nth iteration is peeled first.
666  virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar,
667  MachineInstr &Cmp,
670  unsigned Iter, unsigned MaxIter) const {
671  llvm_unreachable("Target didn't implement ReduceLoopCount");
672  }
673 
674  /// Delete the instruction OldInst and everything after it, replacing it with
675  /// an unconditional branch to NewDest. This is used by the tail merging pass.
677  MachineBasicBlock *NewDest) const;
678 
679  /// Return true if it's legal to split the given basic
680  /// block at the specified instruction (i.e. instruction would be the start
681  /// of a new basic block).
683  MachineBasicBlock::iterator MBBI) const {
684  return true;
685  }
686 
687  /// Return true if it's profitable to predicate
688  /// instructions with accumulated instruction latency of "NumCycles"
689  /// of the specified basic block, where the probability of the instructions
690  /// being executed is given by Probability, and Confidence is a measure
691  /// of our confidence that it will be properly predicted.
692  virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
693  unsigned ExtraPredCycles,
694  BranchProbability Probability) const {
695  return false;
696  }
697 
698  /// Second variant of isProfitableToIfCvt. This one
699  /// checks for the case where two basic blocks from true and false path
700  /// of a if-then-else (diamond) are predicated on mutally exclusive
701  /// predicates, where the probability of the true path being taken is given
702  /// by Probability, and Confidence is a measure of our confidence that it
703  /// will be properly predicted.
704  virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
705  unsigned ExtraTCycles,
706  MachineBasicBlock &FMBB, unsigned NumFCycles,
707  unsigned ExtraFCycles,
708  BranchProbability Probability) const {
709  return false;
710  }
711 
712  /// Return true if it's profitable for if-converter to duplicate instructions
713  /// of specified accumulated instruction latencies in the specified MBB to
714  /// enable if-conversion.
715  /// The probability of the instructions being executed is given by
716  /// Probability, and Confidence is a measure of our confidence that it
717  /// will be properly predicted.
719  unsigned NumCycles,
720  BranchProbability Probability) const {
721  return false;
722  }
723 
724  /// Return true if it's profitable to unpredicate
725  /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
726  /// exclusive predicates.
727  /// e.g.
728  /// subeq r0, r1, #1
729  /// addne r0, r1, #1
730  /// =>
731  /// sub r0, r1, #1
732  /// addne r0, r1, #1
733  ///
734  /// This may be profitable is conditional instructions are always executed.
736  MachineBasicBlock &FMBB) const {
737  return false;
738  }
739 
740  /// Return true if it is possible to insert a select
741  /// instruction that chooses between TrueReg and FalseReg based on the
742  /// condition code in Cond.
743  ///
744  /// When successful, also return the latency in cycles from TrueReg,
745  /// FalseReg, and Cond to the destination register. In most cases, a select
746  /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
747  ///
748  /// Some x86 implementations have 2-cycle cmov instructions.
749  ///
750  /// @param MBB Block where select instruction would be inserted.
751  /// @param Cond Condition returned by AnalyzeBranch.
752  /// @param TrueReg Virtual register to select when Cond is true.
753  /// @param FalseReg Virtual register to select when Cond is false.
754  /// @param CondCycles Latency from Cond+Branch to select output.
755  /// @param TrueCycles Latency from TrueReg to select output.
756  /// @param FalseCycles Latency from FalseReg to select output.
757  virtual bool canInsertSelect(const MachineBasicBlock &MBB,
758  ArrayRef<MachineOperand> Cond, unsigned TrueReg,
759  unsigned FalseReg, int &CondCycles,
760  int &TrueCycles, int &FalseCycles) const {
761  return false;
762  }
763 
764  /// Insert a select instruction into MBB before I that will copy TrueReg to
765  /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
766  ///
767  /// This function can only be called after canInsertSelect() returned true.
768  /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
769  /// that the same flags or registers required by Cond are available at the
770  /// insertion point.
771  ///
772  /// @param MBB Block where select instruction should be inserted.
773  /// @param I Insertion point.
774  /// @param DL Source location for debugging.
775  /// @param DstReg Virtual register to be defined by select instruction.
776  /// @param Cond Condition as computed by AnalyzeBranch.
777  /// @param TrueReg Virtual register to copy when Cond is true.
778  /// @param FalseReg Virtual register to copy when Cons is false.
779  virtual void insertSelect(MachineBasicBlock &MBB,
781  unsigned DstReg, ArrayRef<MachineOperand> Cond,
782  unsigned TrueReg, unsigned FalseReg) const {
783  llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
784  }
785 
786  /// Analyze the given select instruction, returning true if
787  /// it cannot be understood. It is assumed that MI->isSelect() is true.
788  ///
789  /// When successful, return the controlling condition and the operands that
790  /// determine the true and false result values.
791  ///
792  /// Result = SELECT Cond, TrueOp, FalseOp
793  ///
794  /// Some targets can optimize select instructions, for example by predicating
795  /// the instruction defining one of the operands. Such targets should set
796  /// Optimizable.
797  ///
798  /// @param MI Select instruction to analyze.
799  /// @param Cond Condition controlling the select.
800  /// @param TrueOp Operand number of the value selected when Cond is true.
801  /// @param FalseOp Operand number of the value selected when Cond is false.
802  /// @param Optimizable Returned as true if MI is optimizable.
803  /// @returns False on success.
804  virtual bool analyzeSelect(const MachineInstr &MI,
806  unsigned &TrueOp, unsigned &FalseOp,
807  bool &Optimizable) const {
808  assert(MI.getDesc().isSelect() && "MI must be a select instruction");
809  return true;
810  }
811 
812  /// Given a select instruction that was understood by
813  /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
814  /// merging it with one of its operands. Returns NULL on failure.
815  ///
816  /// When successful, returns the new select instruction. The client is
817  /// responsible for deleting MI.
818  ///
819  /// If both sides of the select can be optimized, PreferFalse is used to pick
820  /// a side.
821  ///
822  /// @param MI Optimizable select instruction.
823  /// @param NewMIs Set that record all MIs in the basic block up to \p
824  /// MI. Has to be updated with any newly created MI or deleted ones.
825  /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
826  /// @returns Optimized instruction or NULL.
829  bool PreferFalse = false) const {
830  // This function must be implemented if Optimizable is ever set.
831  llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
832  }
833 
834  /// Emit instructions to copy a pair of physical registers.
835  ///
836  /// This function should support copies within any legal register class as
837  /// well as any cross-class copies created during instruction selection.
838  ///
839  /// The source and destination registers may overlap, which may require a
840  /// careful implementation when multiple copy instructions are required for
841  /// large registers. See for example the ARM target.
842  virtual void copyPhysReg(MachineBasicBlock &MBB,
844  unsigned DestReg, unsigned SrcReg,
845  bool KillSrc) const {
846  llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
847  }
848 
849 protected:
850  /// Target-dependent implemenation for IsCopyInstr.
851  /// If the specific machine instruction is a instruction that moves/copies
852  /// value from one register to another register return true along with
853  /// @Source machine operand and @Destination machine operand.
854  virtual bool isCopyInstrImpl(const MachineInstr &MI,
855  const MachineOperand *&Source,
856  const MachineOperand *&Destination) const {
857  return false;
858  }
859 
860 public:
861  /// If the specific machine instruction is a instruction that moves/copies
862  /// value from one register to another register return true along with
863  /// @Source machine operand and @Destination machine operand.
864  /// For COPY-instruction the method naturally returns true, for all other
865  /// instructions the method calls target-dependent implementation.
867  const MachineOperand *&Destination) const {
868  if (MI.isCopy()) {
869  Destination = &MI.getOperand(0);
870  Source = &MI.getOperand(1);
871  return true;
872  }
873  return isCopyInstrImpl(MI, Source, Destination);
874  }
875 
876  /// Store the specified register of the given register class to the specified
877  /// stack frame index. The store instruction is to be added to the given
878  /// machine basic block before the specified machine instruction. If isKill
879  /// is true, the register operand is the last use and must be marked kill.
882  unsigned SrcReg, bool isKill, int FrameIndex,
883  const TargetRegisterClass *RC,
884  const TargetRegisterInfo *TRI) const {
885  llvm_unreachable("Target didn't implement "
886  "TargetInstrInfo::storeRegToStackSlot!");
887  }
888 
889  /// Load the specified register of the given register class from the specified
890  /// stack frame index. The load instruction is to be added to the given
891  /// machine basic block before the specified machine instruction.
894  unsigned DestReg, int FrameIndex,
895  const TargetRegisterClass *RC,
896  const TargetRegisterInfo *TRI) const {
897  llvm_unreachable("Target didn't implement "
898  "TargetInstrInfo::loadRegFromStackSlot!");
899  }
900 
901  /// This function is called for all pseudo instructions
902  /// that remain after register allocation. Many pseudo instructions are
903  /// created to help register allocation. This is the place to convert them
904  /// into real instructions. The target can edit MI in place, or it can insert
905  /// new instructions and erase MI. The function should return true if
906  /// anything was changed.
907  virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
908 
909  /// Check whether the target can fold a load that feeds a subreg operand
910  /// (or a subreg operand that feeds a store).
911  /// For example, X86 may want to return true if it can fold
912  /// movl (%esp), %eax
913  /// subb, %al, ...
914  /// Into:
915  /// subb (%esp), ...
916  ///
917  /// Ideally, we'd like the target implementation of foldMemoryOperand() to
918  /// reject subregs - but since this behavior used to be enforced in the
919  /// target-independent code, moving this responsibility to the targets
920  /// has the potential of causing nasty silent breakage in out-of-tree targets.
921  virtual bool isSubregFoldable() const { return false; }
922 
923  /// Attempt to fold a load or store of the specified stack
924  /// slot into the specified machine instruction for the specified operand(s).
925  /// If this is possible, a new instruction is returned with the specified
926  /// operand folded, otherwise NULL is returned.
927  /// The new instruction is inserted before MI, and the client is responsible
928  /// for removing the old instruction.
930  int FI,
931  LiveIntervals *LIS = nullptr) const;
932 
933  /// Same as the previous version except it allows folding of any load and
934  /// store from / to any address, not just from a specific stack slot.
936  MachineInstr &LoadMI,
937  LiveIntervals *LIS = nullptr) const;
938 
939  /// Return true when there is potentially a faster code sequence
940  /// for an instruction chain ending in \p Root. All potential patterns are
941  /// returned in the \p Pattern vector. Pattern should be sorted in priority
942  /// order since the pattern evaluator stops checking as soon as it finds a
943  /// faster sequence.
944  /// \param Root - Instruction that could be combined with one of its operands
945  /// \param Patterns - Vector of possible combination patterns
946  virtual bool getMachineCombinerPatterns(
947  MachineInstr &Root,
949 
950  /// Return true when a code sequence can improve throughput. It
951  /// should be called only for instructions in loops.
952  /// \param Pattern - combiner pattern
953  virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
954 
955  /// Return true if the input \P Inst is part of a chain of dependent ops
956  /// that are suitable for reassociation, otherwise return false.
957  /// If the instruction's operands must be commuted to have a previous
958  /// instruction of the same type define the first source operand, \P Commuted
959  /// will be set to true.
960  bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
961 
962  /// Return true when \P Inst is both associative and commutative.
963  virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const {
964  return false;
965  }
966 
967  /// Return true when \P Inst has reassociable operands in the same \P MBB.
968  virtual bool hasReassociableOperands(const MachineInstr &Inst,
969  const MachineBasicBlock *MBB) const;
970 
971  /// Return true when \P Inst has reassociable sibling.
972  bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const;
973 
974  /// When getMachineCombinerPatterns() finds patterns, this function generates
975  /// the instructions that could replace the original code sequence. The client
976  /// has to decide whether the actual replacement is beneficial or not.
977  /// \param Root - Instruction that could be combined with one of its operands
978  /// \param Pattern - Combination pattern for Root
979  /// \param InsInstrs - Vector of new instructions that implement P
980  /// \param DelInstrs - Old instructions, including Root, that could be
981  /// replaced by InsInstr
982  /// \param InstIdxForVirtReg - map of virtual register to instruction in
983  /// InsInstr that defines it
984  virtual void genAlternativeCodeSequence(
985  MachineInstr &Root, MachineCombinerPattern Pattern,
988  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;
989 
990  /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
991  /// reduce critical path length.
992  void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
993  MachineCombinerPattern Pattern,
996  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
997 
998  /// This is an architecture-specific helper function of reassociateOps.
999  /// Set special operand attributes for new instructions after reassociation.
1000  virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1001  MachineInstr &NewMI1,
1002  MachineInstr &NewMI2) const {}
1003 
1004  /// Return true when a target supports MachineCombiner.
1005  virtual bool useMachineCombiner() const { return false; }
1006 
1007  /// Return true if the given SDNode can be copied during scheduling
1008  /// even if it has glue.
1009  virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1010 
1011 protected:
1012  /// Target-dependent implementation for foldMemoryOperand.
1013  /// Target-independent code in foldMemoryOperand will
1014  /// take care of adding a MachineMemOperand to the newly created instruction.
1015  /// The instruction and any auxiliary instructions necessary will be inserted
1016  /// at InsertPt.
1017  virtual MachineInstr *
1019  ArrayRef<unsigned> Ops,
1021  LiveIntervals *LIS = nullptr) const {
1022  return nullptr;
1023  }
1024 
1025  /// Target-dependent implementation for foldMemoryOperand.
1026  /// Target-independent code in foldMemoryOperand will
1027  /// take care of adding a MachineMemOperand to the newly created instruction.
1028  /// The instruction and any auxiliary instructions necessary will be inserted
1029  /// at InsertPt.
1032  MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1033  LiveIntervals *LIS = nullptr) const {
1034  return nullptr;
1035  }
1036 
1037  /// Target-dependent implementation of getRegSequenceInputs.
1038  ///
1039  /// \returns true if it is possible to build the equivalent
1040  /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1041  ///
1042  /// \pre MI.isRegSequenceLike().
1043  ///
1044  /// \see TargetInstrInfo::getRegSequenceInputs.
1046  const MachineInstr &MI, unsigned DefIdx,
1047  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1048  return false;
1049  }
1050 
1051  /// Target-dependent implementation of getExtractSubregInputs.
1052  ///
1053  /// \returns true if it is possible to build the equivalent
1054  /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1055  ///
1056  /// \pre MI.isExtractSubregLike().
1057  ///
1058  /// \see TargetInstrInfo::getExtractSubregInputs.
1060  unsigned DefIdx,
1061  RegSubRegPairAndIdx &InputReg) const {
1062  return false;
1063  }
1064 
1065  /// Target-dependent implementation of getInsertSubregInputs.
1066  ///
1067  /// \returns true if it is possible to build the equivalent
1068  /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1069  ///
1070  /// \pre MI.isInsertSubregLike().
1071  ///
1072  /// \see TargetInstrInfo::getInsertSubregInputs.
1073  virtual bool
1074  getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
1075  RegSubRegPair &BaseReg,
1076  RegSubRegPairAndIdx &InsertedReg) const {
1077  return false;
1078  }
1079 
1080 public:
1081  /// getAddressSpaceForPseudoSourceKind - Given the kind of memory
1082  /// (e.g. stack) the target returns the corresponding address space.
1083  virtual unsigned
1085  return 0;
1086  }
1087 
1088  /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1089  /// a store or a load and a store into two or more instruction. If this is
1090  /// possible, returns true as well as the new instructions by reference.
1091  virtual bool
1093  bool UnfoldLoad, bool UnfoldStore,
1094  SmallVectorImpl<MachineInstr *> &NewMIs) const {
1095  return false;
1096  }
1097 
1099  SmallVectorImpl<SDNode *> &NewNodes) const {
1100  return false;
1101  }
1102 
1103  /// Returns the opcode of the would be new
1104  /// instruction after load / store are unfolded from an instruction of the
1105  /// specified opcode. It returns zero if the specified unfolding is not
1106  /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1107  /// index of the operand which will hold the register holding the loaded
1108  /// value.
1109  virtual unsigned
1110  getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1111  unsigned *LoadRegIndex = nullptr) const {
1112  return 0;
1113  }
1114 
1115  /// This is used by the pre-regalloc scheduler to determine if two loads are
1116  /// loading from the same base address. It should only return true if the base
1117  /// pointers are the same and the only differences between the two addresses
1118  /// are the offset. It also returns the offsets by reference.
1119  virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1120  int64_t &Offset1,
1121  int64_t &Offset2) const {
1122  return false;
1123  }
1124 
1125  /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1126  /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1127  /// On some targets if two loads are loading from
1128  /// addresses in the same cache line, it's better if they are scheduled
1129  /// together. This function takes two integers that represent the load offsets
1130  /// from the common base address. It returns true if it decides it's desirable
1131  /// to schedule the two loads together. "NumLoads" is the number of loads that
1132  /// have already been scheduled after Load1.
1133  virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1134  int64_t Offset1, int64_t Offset2,
1135  unsigned NumLoads) const {
1136  return false;
1137  }
1138 
1139  /// Get the base operand and byte offset of an instruction that reads/writes
1140  /// memory.
1142  MachineOperand *&BaseOp, int64_t &Offset,
1143  const TargetRegisterInfo *TRI) const {
1144  return false;
1145  }
1146 
1147  /// Return true if the instruction contains a base register and offset. If
1148  /// true, the function also sets the operand position in the instruction
1149  /// for the base register and offset.
1150  virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
1151  unsigned &BasePos,
1152  unsigned &OffsetPos) const {
1153  return false;
1154  }
1155 
1156  /// If the instruction is an increment of a constant value, return the amount.
1157  virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1158  return false;
1159  }
1160 
1161  /// Returns true if the two given memory operations should be scheduled
1162  /// adjacent. Note that you have to add:
1163  /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1164  /// or
1165  /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1166  /// to TargetPassConfig::createMachineScheduler() to have an effect.
1167  virtual bool shouldClusterMemOps(MachineOperand &BaseOp1,
1168  MachineOperand &BaseOp2,
1169  unsigned NumLoads) const {
1170  llvm_unreachable("target did not implement shouldClusterMemOps()");
1171  }
1172 
1173  /// Reverses the branch condition of the specified condition list,
1174  /// returning false on success and true if it cannot be reversed.
1175  virtual bool
1177  return true;
1178  }
1179 
1180  /// Insert a noop into the instruction stream at the specified point.
1181  virtual void insertNoop(MachineBasicBlock &MBB,
1182  MachineBasicBlock::iterator MI) const;
1183 
1184  /// Return the noop instruction to use for a noop.
1185  virtual void getNoop(MCInst &NopInst) const;
1186 
1187  /// Return true for post-incremented instructions.
1188  virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1189 
1190  /// Returns true if the instruction is already predicated.
1191  virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1192 
1193  /// Returns true if the instruction is a
1194  /// terminator instruction that has not been predicated.
1195  virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1196 
1197  /// Returns true if MI is an unconditional tail call.
1198  virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1199  return false;
1200  }
1201 
1202  /// Returns true if the tail call can be made conditional on BranchCond.
1204  const MachineInstr &TailCall) const {
1205  return false;
1206  }
1207 
1208  /// Replace the conditional branch in MBB with a conditional tail call.
1211  const MachineInstr &TailCall) const {
1212  llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1213  }
1214 
1215  /// Convert the instruction into a predicated instruction.
1216  /// It returns true if the operation was successful.
1217  virtual bool PredicateInstruction(MachineInstr &MI,
1218  ArrayRef<MachineOperand> Pred) const;
1219 
1220  /// Returns true if the first specified predicate
1221  /// subsumes the second, e.g. GE subsumes GT.
1223  ArrayRef<MachineOperand> Pred2) const {
1224  return false;
1225  }
1226 
1227  /// If the specified instruction defines any predicate
1228  /// or condition code register(s) used for predication, returns true as well
1229  /// as the definition predicate(s) by reference.
1230  virtual bool DefinesPredicate(MachineInstr &MI,
1231  std::vector<MachineOperand> &Pred) const {
1232  return false;
1233  }
1234 
1235  /// Return true if the specified instruction can be predicated.
1236  /// By default, this returns true for every instruction with a
1237  /// PredicateOperand.
1238  virtual bool isPredicable(const MachineInstr &MI) const {
1239  return MI.getDesc().isPredicable();
1240  }
1241 
1242  /// Return true if it's safe to move a machine
1243  /// instruction that defines the specified register class.
1244  virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1245  return true;
1246  }
1247 
1248  /// Test if the given instruction should be considered a scheduling boundary.
1249  /// This primarily includes labels and terminators.
1250  virtual bool isSchedulingBoundary(const MachineInstr &MI,
1251  const MachineBasicBlock *MBB,
1252  const MachineFunction &MF) const;
1253 
1254  /// Measure the specified inline asm to determine an approximation of its
1255  /// length.
1256  virtual unsigned getInlineAsmLength(const char *Str,
1257  const MCAsmInfo &MAI) const;
1258 
1259  /// Allocate and return a hazard recognizer to use for this target when
1260  /// scheduling the machine instructions before register allocation.
1261  virtual ScheduleHazardRecognizer *
1263  const ScheduleDAG *DAG) const;
1264 
1265  /// Allocate and return a hazard recognizer to use for this target when
1266  /// scheduling the machine instructions before register allocation.
1267  virtual ScheduleHazardRecognizer *
1269  const ScheduleDAG *DAG) const;
1270 
1271  /// Allocate and return a hazard recognizer to use for this target when
1272  /// scheduling the machine instructions after register allocation.
1273  virtual ScheduleHazardRecognizer *
1275  const ScheduleDAG *DAG) const;
1276 
1277  /// Allocate and return a hazard recognizer to use for by non-scheduling
1278  /// passes.
1279  virtual ScheduleHazardRecognizer *
1281  return nullptr;
1282  }
1283 
1284  /// Provide a global flag for disabling the PreRA hazard recognizer that
1285  /// targets may choose to honor.
1286  bool usePreRAHazardRecognizer() const;
1287 
1288  /// For a comparison instruction, return the source registers
1289  /// in SrcReg and SrcReg2 if having two register operands, and the value it
1290  /// compares against in CmpValue. Return true if the comparison instruction
1291  /// can be analyzed.
1292  virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1293  unsigned &SrcReg2, int &Mask, int &Value) const {
1294  return false;
1295  }
1296 
1297  /// See if the comparison instruction can be converted
1298  /// into something more efficient. E.g., on ARM most instructions can set the
1299  /// flags register, obviating the need for a separate CMP.
1300  virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
1301  unsigned SrcReg2, int Mask, int Value,
1302  const MachineRegisterInfo *MRI) const {
1303  return false;
1304  }
1305  virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1306 
1307  /// Try to remove the load by folding it to a register operand at the use.
1308  /// We fold the load instructions if and only if the
1309  /// def and use are in the same BB. We only look at one load and see
1310  /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1311  /// defined by the load we are trying to fold. DefMI returns the machine
1312  /// instruction that defines FoldAsLoadDefReg, and the function returns
1313  /// the machine instruction generated due to folding.
1315  const MachineRegisterInfo *MRI,
1316  unsigned &FoldAsLoadDefReg,
1317  MachineInstr *&DefMI) const {
1318  return nullptr;
1319  }
1320 
1321  /// 'Reg' is known to be defined by a move immediate instruction,
1322  /// try to fold the immediate into the use instruction.
1323  /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1324  /// then the caller may assume that DefMI has been erased from its parent
1325  /// block. The caller may assume that it will not be erased by this
1326  /// function otherwise.
1328  unsigned Reg, MachineRegisterInfo *MRI) const {
1329  return false;
1330  }
1331 
1332  /// Return the number of u-operations the given machine
1333  /// instruction will be decoded to on the target cpu. The itinerary's
1334  /// IssueWidth is the number of microops that can be dispatched each
1335  /// cycle. An instruction with zero microops takes no dispatch resources.
1336  virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1337  const MachineInstr &MI) const;
1338 
1339  /// Return true for pseudo instructions that don't consume any
1340  /// machine resources in their current form. These are common cases that the
1341  /// scheduler should consider free, rather than conservatively handling them
1342  /// as instructions with no itinerary.
1343  bool isZeroCost(unsigned Opcode) const {
1344  return Opcode <= TargetOpcode::COPY;
1345  }
1346 
1347  virtual int getOperandLatency(const InstrItineraryData *ItinData,
1348  SDNode *DefNode, unsigned DefIdx,
1349  SDNode *UseNode, unsigned UseIdx) const;
1350 
1351  /// Compute and return the use operand latency of a given pair of def and use.
1352  /// In most cases, the static scheduling itinerary was enough to determine the
1353  /// operand latency. But it may not be possible for instructions with variable
1354  /// number of defs / uses.
1355  ///
1356  /// This is a raw interface to the itinerary that may be directly overridden
1357  /// by a target. Use computeOperandLatency to get the best estimate of
1358  /// latency.
1359  virtual int getOperandLatency(const InstrItineraryData *ItinData,
1360  const MachineInstr &DefMI, unsigned DefIdx,
1361  const MachineInstr &UseMI,
1362  unsigned UseIdx) const;
1363 
1364  /// Compute the instruction latency of a given instruction.
1365  /// If the instruction has higher cost when predicated, it's returned via
1366  /// PredCost.
1367  virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1368  const MachineInstr &MI,
1369  unsigned *PredCost = nullptr) const;
1370 
1371  virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1372 
1373  virtual int getInstrLatency(const InstrItineraryData *ItinData,
1374  SDNode *Node) const;
1375 
1376  /// Return the default expected latency for a def based on its opcode.
1377  unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1378  const MachineInstr &DefMI) const;
1379 
1380  int computeDefOperandLatency(const InstrItineraryData *ItinData,
1381  const MachineInstr &DefMI) const;
1382 
1383  /// Return true if this opcode has high latency to its result.
1384  virtual bool isHighLatencyDef(int opc) const { return false; }
1385 
1386  /// Compute operand latency between a def of 'Reg'
1387  /// and a use in the current loop. Return true if the target considered
1388  /// it 'high'. This is used by optimization passes such as machine LICM to
1389  /// determine whether it makes sense to hoist an instruction out even in a
1390  /// high register pressure situation.
1391  virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1392  const MachineRegisterInfo *MRI,
1393  const MachineInstr &DefMI, unsigned DefIdx,
1394  const MachineInstr &UseMI,
1395  unsigned UseIdx) const {
1396  return false;
1397  }
1398 
1399  /// Compute operand latency of a def of 'Reg'. Return true
1400  /// if the target considered it 'low'.
1401  virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1402  const MachineInstr &DefMI,
1403  unsigned DefIdx) const;
1404 
1405  /// Perform target-specific instruction verification.
1406  virtual bool verifyInstruction(const MachineInstr &MI,
1407  StringRef &ErrInfo) const {
1408  return true;
1409  }
1410 
1411  /// Return the current execution domain and bit mask of
1412  /// possible domains for instruction.
1413  ///
1414  /// Some micro-architectures have multiple execution domains, and multiple
1415  /// opcodes that perform the same operation in different domains. For
1416  /// example, the x86 architecture provides the por, orps, and orpd
1417  /// instructions that all do the same thing. There is a latency penalty if a
1418  /// register is written in one domain and read in another.
1419  ///
1420  /// This function returns a pair (domain, mask) containing the execution
1421  /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1422  /// function can be used to change the opcode to one of the domains in the
1423  /// bit mask. Instructions whose execution domain can't be changed should
1424  /// return a 0 mask.
1425  ///
1426  /// The execution domain numbers don't have any special meaning except domain
1427  /// 0 is used for instructions that are not associated with any interesting
1428  /// execution domain.
1429  ///
1430  virtual std::pair<uint16_t, uint16_t>
1432  return std::make_pair(0, 0);
1433  }
1434 
1435  /// Change the opcode of MI to execute in Domain.
1436  ///
1437  /// The bit (1 << Domain) must be set in the mask returned from
1438  /// getExecutionDomain(MI).
1439  virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1440 
1441  /// Returns the preferred minimum clearance
1442  /// before an instruction with an unwanted partial register update.
1443  ///
1444  /// Some instructions only write part of a register, and implicitly need to
1445  /// read the other parts of the register. This may cause unwanted stalls
1446  /// preventing otherwise unrelated instructions from executing in parallel in
1447  /// an out-of-order CPU.
1448  ///
1449  /// For example, the x86 instruction cvtsi2ss writes its result to bits
1450  /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1451  /// the instruction needs to wait for the old value of the register to become
1452  /// available:
1453  ///
1454  /// addps %xmm1, %xmm0
1455  /// movaps %xmm0, (%rax)
1456  /// cvtsi2ss %rbx, %xmm0
1457  ///
1458  /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1459  /// instruction before it can issue, even though the high bits of %xmm0
1460  /// probably aren't needed.
1461  ///
1462  /// This hook returns the preferred clearance before MI, measured in
1463  /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1464  /// instructions before MI. It should only return a positive value for
1465  /// unwanted dependencies. If the old bits of the defined register have
1466  /// useful values, or if MI is determined to otherwise read the dependency,
1467  /// the hook should return 0.
1468  ///
1469  /// The unwanted dependency may be handled by:
1470  ///
1471  /// 1. Allocating the same register for an MI def and use. That makes the
1472  /// unwanted dependency identical to a required dependency.
1473  ///
1474  /// 2. Allocating a register for the def that has no defs in the previous N
1475  /// instructions.
1476  ///
1477  /// 3. Calling breakPartialRegDependency() with the same arguments. This
1478  /// allows the target to insert a dependency breaking instruction.
1479  ///
1480  virtual unsigned
1481  getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
1482  const TargetRegisterInfo *TRI) const {
1483  // The default implementation returns 0 for no partial register dependency.
1484  return 0;
1485  }
1486 
1487  /// Return the minimum clearance before an instruction that reads an
1488  /// unused register.
1489  ///
1490  /// For example, AVX instructions may copy part of a register operand into
1491  /// the unused high bits of the destination register.
1492  ///
1493  /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
1494  ///
1495  /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1496  /// false dependence on any previous write to %xmm0.
1497  ///
1498  /// This hook works similarly to getPartialRegUpdateClearance, except that it
1499  /// does not take an operand index. Instead sets \p OpNum to the index of the
1500  /// unused register.
1501  virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
1502  const TargetRegisterInfo *TRI) const {
1503  // The default implementation returns 0 for no undef register dependency.
1504  return 0;
1505  }
1506 
1507  /// Insert a dependency-breaking instruction
1508  /// before MI to eliminate an unwanted dependency on OpNum.
1509  ///
1510  /// If it wasn't possible to avoid a def in the last N instructions before MI
1511  /// (see getPartialRegUpdateClearance), this hook will be called to break the
1512  /// unwanted dependency.
1513  ///
1514  /// On x86, an xorps instruction can be used as a dependency breaker:
1515  ///
1516  /// addps %xmm1, %xmm0
1517  /// movaps %xmm0, (%rax)
1518  /// xorps %xmm0, %xmm0
1519  /// cvtsi2ss %rbx, %xmm0
1520  ///
1521  /// An <imp-kill> operand should be added to MI if an instruction was
1522  /// inserted. This ties the instructions together in the post-ra scheduler.
1523  ///
1524  virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
1525  const TargetRegisterInfo *TRI) const {}
1526 
1527  /// Create machine specific model for scheduling.
1528  virtual DFAPacketizer *
1530  return nullptr;
1531  }
1532 
1533  /// Sometimes, it is possible for the target
1534  /// to tell, even without aliasing information, that two MIs access different
1535  /// memory addresses. This function returns true if two MIs access different
1536  /// memory addresses and false otherwise.
1537  ///
1538  /// Assumes any physical registers used to compute addresses have the same
1539  /// value for both instructions. (This is the most useful assumption for
1540  /// post-RA scheduling.)
1541  ///
1542  /// See also MachineInstr::mayAlias, which is implemented on top of this
1543  /// function.
1544  virtual bool
1546  AliasAnalysis *AA = nullptr) const {
1547  assert((MIa.mayLoad() || MIa.mayStore()) &&
1548  "MIa must load from or modify a memory location");
1549  assert((MIb.mayLoad() || MIb.mayStore()) &&
1550  "MIb must load from or modify a memory location");
1551  return false;
1552  }
1553 
1554  /// Return the value to use for the MachineCSE's LookAheadLimit,
1555  /// which is a heuristic used for CSE'ing phys reg defs.
1556  virtual unsigned getMachineCSELookAheadLimit() const {
1557  // The default lookahead is small to prevent unprofitable quadratic
1558  // behavior.
1559  return 5;
1560  }
1561 
1562  /// Return an array that contains the ids of the target indices (used for the
1563  /// TargetIndex machine operand) and their names.
1564  ///
1565  /// MIR Serialization is able to serialize only the target indices that are
1566  /// defined by this method.
1569  return None;
1570  }
1571 
1572  /// Decompose the machine operand's target flags into two values - the direct
1573  /// target flag value and any of bit flags that are applied.
1574  virtual std::pair<unsigned, unsigned>
1575  decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
1576  return std::make_pair(0u, 0u);
1577  }
1578 
1579  /// Return an array that contains the direct target flag values and their
1580  /// names.
1581  ///
1582  /// MIR Serialization is able to serialize only the target flags that are
1583  /// defined by this method.
1586  return None;
1587  }
1588 
1589  /// Return an array that contains the bitmask target flag values and their
1590  /// names.
1591  ///
1592  /// MIR Serialization is able to serialize only the target flags that are
1593  /// defined by this method.
1596  return None;
1597  }
1598 
1599  /// Return an array that contains the MMO target flag values and their
1600  /// names.
1601  ///
1602  /// MIR Serialization is able to serialize only the MMO target flags that are
1603  /// defined by this method.
1606  return None;
1607  }
1608 
1609  /// Determines whether \p Inst is a tail call instruction. Override this
1610  /// method on targets that do not properly set MCID::Return and MCID::Call on
1611  /// tail call instructions."
1612  virtual bool isTailCall(const MachineInstr &Inst) const {
1613  return Inst.isReturn() && Inst.isCall();
1614  }
1615 
1616  /// True if the instruction is bound to the top of its basic block and no
1617  /// other instructions shall be inserted before it. This can be implemented
1618  /// to prevent register allocator to insert spills before such instructions.
1619  virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
1620  return false;
1621  }
1622 
1623  /// Returns a \p outliner::OutlinedFunction struct containing target-specific
1624  /// information for a set of outlining candidates.
1626  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1628  "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
1629  }
1630 
1631  /// Returns how or if \p MI should be outlined.
1632  virtual outliner::InstrType
1633  getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1635  "Target didn't implement TargetInstrInfo::getOutliningType!");
1636  }
1637 
1638  /// Optional target hook that returns true if \p MBB is safe to outline from,
1639  /// and returns any target-specific information in \p Flags.
1641  unsigned &Flags) const {
1642  return true;
1643  }
1644 
1645  /// Insert a custom frame for outlined functions.
1647  const outliner::OutlinedFunction &OF) const {
1649  "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
1650  }
1651 
1652  /// Insert a call to an outlined function into the program.
1653  /// Returns an iterator to the spot where we inserted the call. This must be
1654  /// implemented by the target.
1658  const outliner::Candidate &C) const {
1660  "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
1661  }
1662 
1663  /// Return true if the function can safely be outlined from.
1664  /// A function \p MF is considered safe for outlining if an outlined function
1665  /// produced from instructions in F will produce a program which produces the
1666  /// same output for any set of given inputs.
1668  bool OutlineFromLinkOnceODRs) const {
1669  llvm_unreachable("Target didn't implement "
1670  "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
1671  }
1672 
1673  /// Return true if the function should be outlined from by default.
1675  return false;
1676  }
1677 
1678 private:
1679  unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
1680  unsigned CatchRetOpcode;
1681  unsigned ReturnOpcode;
1682 };
1683 
1684 /// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
1687 
1689  return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
1690  RegInfo::getEmptyKey());
1691  }
1692 
1694  return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
1695  RegInfo::getTombstoneKey());
1696  }
1697 
1698  /// Reuse getHashValue implementation from
1699  /// std::pair<unsigned, unsigned>.
1700  static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
1701  std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
1702  return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
1703  }
1704 
1705  static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
1706  const TargetInstrInfo::RegSubRegPair &RHS) {
1707  return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
1708  RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
1709  }
1710 };
1711 
1712 } // end namespace llvm
1713 
1714 #endif // LLVM_TARGET_TARGETINSTRINFO_H
virtual std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned) const
Decompose the machine operand&#39;s target flags into two values - the direct target flag value and any o...
uint64_t CallInst * C
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:636
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val)
Reuse getHashValue implementation from std::pair<unsigned, unsigned>.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const
This is used by the pre-regalloc scheduler to determine if two loads are loading from the same base a...
virtual bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const
Return true if it is possible to insert a select instruction that chooses between TrueReg and FalseRe...
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:633
static TargetInstrInfo::RegSubRegPair getTombstoneKey()
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DstReg, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true...
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
Return true if it&#39;s safe to move a machine instruction that defines the specified register class...
virtual bool isCopyInstrImpl(const MachineInstr &MI, const MachineOperand *&Source, const MachineOperand *&Destination) const
Target-dependent implemenation for IsCopyInstr.
This class represents lattice values for constants.
Definition: AllocatorList.h:24
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
RegSubRegPair(unsigned Reg=0, unsigned SubReg=0)
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const
Change the opcode of MI to execute in Domain.
virtual bool DefinesPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred) const
If the specified instruction defines any predicate or condition code register(s) used for predication...
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int Mask, int Value, const MachineRegisterInfo *MRI) const
See if the comparison instruction can be converted into something more efficient. ...
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI, AliasAnalysis *AA) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:164
virtual bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const
Analyze the given select instruction, returning true if it cannot be understood.
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
TargetInstrInfo(unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u)
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual bool useMachineCombiner() const
Return true when a target supports MachineCombiner.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
unsigned getCallFrameDestroyOpcode() const
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static bool isGenericOpcode(unsigned Opc)
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr *> &PrevInsts, unsigned Iter, unsigned MaxIter) const
Generate code to reduce the loop iteration by one and check if the loop is finished.
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
An individual sequence of instructions to be replaced with a call to an outlined function.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
Represents a predicate at the MachineFunction level.
virtual bool getMemOperandWithOffset(MachineInstr &MI, MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
virtual unsigned getMachineCSELookAheadLimit() const
Return the value to use for the MachineCSE&#39;s LookAheadLimit, which is a heuristic used for CSE&#39;ing ph...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isLoadFromStackSlot that returns the number of bytes loaded from the stack...
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const
Return an array that contains the bitmask target flag values and their names.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const
Returns the size in bytes of the specified MachineInstr, or ~0U when this function is not implemented...
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified &#39;original&#39; instruction at the specific location targeting a new destination re...
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, const outliner::Candidate &C) const
Insert a call to an outlined function into the program.
virtual unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Returns the preferred minimum clearance before an instruction with an unwanted partial register updat...
Provide an instruction scheduling machine model to CodeGen passes.
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg, MachineRegisterInfo *MRI) const
&#39;Reg&#39; is known to be defined by a move immediate instruction, try to fold the immediate into the use ...
virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const
Create machine specific model for scheduling.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr *> &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of &#39;Reg&#39;.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
virtual MachineInstr * convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const
This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_ADDR flag.
virtual bool isBasicBlockPrologue(const MachineInstr &MI) const
True if the instruction is bound to the top of its basic block and no other instructions shall be ins...
unsigned getCatchReturnOpcode() const
bool isSelect() const
Return true if this is a select instruction.
Definition: MCInstrDesc.h:321
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const
Return an array that contains the direct target flag values and their names.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const
Emit instructions to copy a pair of physical registers.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:406
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
static bool isEqual(const Function &Caller, const Function &Callee)
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, SmallVectorImpl< SDNode *> &NewNodes) const
bool isRematerializable() const
Returns true if this instruction is a candidate for remat.
Definition: MCInstrDesc.h:481
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const
Return true if it&#39;s profitable to predicate instructions with accumulated instruction latency of "Num...
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Itinerary data supplied by a subtarget to be used by a target.
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
virtual ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const
Return an array that contains the MMO target flag values and their names.
RegSubRegPairAndIdx(unsigned Reg=0, unsigned SubReg=0, unsigned SubIdx=0)
TargetInstrInfo & operator=(const TargetInstrInfo &)=delete
virtual MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr *> &NewMIs, bool PreferFalse=false) const
Given a select instruction that was understood by analyzeSelect and returned Optimizable = true...
virtual bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA=nullptr) const
Sometimes, it is possible for the target to tell, even without aliasing information, that two MIs access different memory addresses.
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:161
BasicBlockListType::iterator iterator
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const
Return true if the function can safely be outlined from.
TargetInstrInfo - Interface to description of machine instruction set.
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:623
virtual void getNoop(MCInst &NopInst) const
Return the noop instruction to use for a noop.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
TargetInstrInfo::RegSubRegPair RegSubRegPair
virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const
Return true if the function should be outlined from by default.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
static const unsigned CommuteAnyOperandIndex
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:820
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
unsigned const MachineRegisterInfo * MRI
virtual bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const
virtual unsigned getPredicationCost(const MachineInstr &MI) const
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
InstrType
Represents how an instruction should be mapped by the outliner.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
MachineInstrBuilder & UseMI
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it&#39;s legal to split the given basic block at the specified instruction (i...
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isStoreToStackSlot that returns the number of bytes stored to the stack...
The information necessary to create an outlined function for some class of candidate.
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MI should be outlined.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:24
Contains all data structures shared between the outliner implemented in MachineOutliner.cpp and target implementations of the outliner.
virtual bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const
Perform target-specific instruction verification.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const
Allocate and return a hazard recognizer to use for by non-scheduling passes.
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
A set of register units.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:308
virtual bool shouldSink(const MachineInstr &MI) const
Return true if the instruction should be sunk by MachineSink.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI) const
Measure the specified inline asm to determine an approximation of its length.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
bool isCopy() const
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const
Return true if the specified machine instruction is a copy of one stack slot to another and has no ot...
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const
Return true if the instruction is as cheap as a move instruction.
virtual unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool optimizeCondBranch(MachineInstr &MI) const
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const
Insert a custom frame for outlined functions.
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
MachineOperand class - Representation of each machine instruction operand.
A pair composed of a register and a sub-register index.
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const
Compute operand latency between a def of &#39;Reg&#39; and a use in the current loop.
MachineInstrBuilder MachineInstrBuilder & DefMI
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const
Return true if it&#39;s profitable for if-converter to duplicate instructions of specified accumulated in...
virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DstReg, unsigned &SubIdx) const
Return true if the instruction is a "coalescable" extension instruction.
Represents one node in the SelectionDAG.
virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, const DebugLoc &DL, int64_t BrOffset=0, RegScavenger *RS=nullptr) const
Insert an unconditional indirect branch at the end of MBB to NewDestBB.
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
int64_t getImm() const
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
virtual bool isPredicable(const MachineInstr &MI) const
Return true if the specified instruction can be predicated.
unsigned getReturnOpcode() const
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
Representation of each machine instruction.
Definition: MachineInstr.h:64
static TargetInstrInfo::RegSubRegPair getEmptyKey()
virtual ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const
Return an array that contains the ids of the target indices (used for the TargetIndex machine operand...
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const
Analyze the loop code, return true if it cannot be understoo.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Source, const MachineOperand *&Destination) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const
Returns true if the first specified predicate subsumes the second, e.g.
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Insert a dependency-breaking instruction before MI to eliminate an unwanted dependency on OpNum...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify=false) const
Analyze the branching code at the end of MBB and parse it into the MachineBranchPredicate structure i...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
bool isZeroCost(unsigned Opcode) const
Return true for pseudo instructions that don&#39;t consume any machine resources in their current form...
virtual std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const
Return the current execution domain and bit mask of possible domains for instruction.
uint32_t Size
Definition: Profile.cpp:47
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, unsigned &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
const unsigned Kind
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual bool isSubregFoldable() const
Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:807
virtual bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2, unsigned NumLoads) const
Returns true if the two given memory operations should be scheduled adjacent.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isFrameSetup(const MachineInstr &I) const
Returns true if the argument is a frame setup pseudo instruction.
virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) const
Second variant of isProfitableToIfCvt.
LLVM Value Representation.
Definition: Value.h:73
virtual MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
IRTranslator LLVM IR MI
virtual bool isPostIncrement(const MachineInstr &MI) const
Return true for post-incremented instructions.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const
Return true if the given SDNode can be copied during scheduling even if it has glue.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
bool isTriviallyReMaterializable(const MachineInstr &MI, AliasAnalysis *AA=nullptr) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
virtual outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const
Returns a outliner::OutlinedFunction struct containing target-specific information for a set of outli...
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:244
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS)
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const
Return true if it&#39;s profitable to unpredicate one side of a &#39;diamond&#39;, i.e.
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const
Returns the opcode of the would be new instruction after load / store are unfolded from an instructio...
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
bool isAsCheapAsAMove(QueryType Type=AllInBundle) const
Returns true if this instruction has the same cost (or less) than a move instruction.
Definition: MachineInstr.h:906
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, const TargetRegisterInfo *TRI) const
Return the minimum clearance before an instruction that reads an unused register. ...
A pair composed of a pair of a register and a sub-register index, and another sub-register index...