LLVM  8.0.1
PPCISelLowering.h
Go to the documentation of this file.
1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that PPC uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
17 
18 #include "PPC.h"
19 #include "PPCInstrInfo.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Type.h"
34 #include <utility>
35 
36 namespace llvm {
37 
38  namespace PPCISD {
39 
40  // When adding a NEW PPCISD node please add it to the correct position in
41  // the enum. The order of elements in this enum matters!
42  // Values that are added after this entry:
43  // STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE
44  // are considerd memory opcodes and are treated differently than entries
45  // that come before it. For example, ADD or MUL should be placed before
46  // the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come
47  // after it.
48  enum NodeType : unsigned {
49  // Start the numbering where the builtin ops and target ops leave off.
51 
52  /// FSEL - Traditional three-operand fsel node.
53  ///
55 
56  /// FCFID - The FCFID instruction, taking an f64 operand and producing
57  /// and f64 value containing the FP representation of the integer that
58  /// was temporarily in the f64 operand.
60 
61  /// Newer FCFID[US] integer-to-floating-point conversion instructions for
62  /// unsigned integers and single-precision outputs.
64 
65  /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
66  /// operand, producing an f64 value containing the integer representation
67  /// of that FP value.
69 
70  /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
71  /// unsigned integers with round toward zero.
73 
74  /// Floating-point-to-interger conversion instructions
76 
77  /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
78  /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
80 
81  /// SExtVElems, takes an input vector of a smaller type and sign
82  /// extends to an output vector of a larger type.
84 
85  /// Reciprocal estimate instructions (unary FP ops).
87 
88  // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
89  // three v4f32 operands and producing a v4f32 result.
91 
92  /// VPERM - The PPC VPERM Instruction.
93  ///
95 
96  /// XXSPLT - The PPC VSX splat instructions
97  ///
99 
100  /// VECINSERT - The PPC vector insert instruction
101  ///
103 
104  /// XXREVERSE - The PPC VSX reverse instruction
105  ///
107 
108  /// VECSHL - The PPC vector shift left instruction
109  ///
111 
112  /// XXPERMDI - The PPC XXPERMDI instruction
113  ///
115 
116  /// The CMPB instruction (takes two operands of i32 or i64).
118 
119  /// Hi/Lo - These represent the high and low 16-bit parts of a global
120  /// address respectively. These nodes have two operands, the first of
121  /// which must be a TargetGlobalAddress, and the second of which must be a
122  /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
123  /// though these are usually folded into other nodes.
124  Hi, Lo,
125 
126  /// The following two target-specific nodes are used for calls through
127  /// function pointers in the 64-bit SVR4 ABI.
128 
129  /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
130  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
131  /// compute an allocation on the stack.
133 
134  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
135  /// compute an offset from native SP to the address of the most recent
136  /// dynamic alloca.
138 
139  /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
140  /// at function entry, used for PIC code.
142 
143  /// These nodes represent PPC shifts.
144  ///
145  /// For scalar types, only the last `n + 1` bits of the shift amounts
146  /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
147  /// for exact behaviors.
148  ///
149  /// For vector types, only the last n bits are used. See vsld.
151 
152  /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
153  /// word and shift left immediate.
155 
156  /// The combination of sra[wd]i and addze used to implemented signed
157  /// integer division by a power of 2. The first operand is the dividend,
158  /// and the second is the constant shift amount (representing the
159  /// divisor).
161 
162  /// CALL - A direct function call.
163  /// CALL_NOP is a call with the special NOP which follows 64-bit
164  /// SVR4 calls.
166 
167  /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
168  /// MTCTR instruction.
170 
171  /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
172  /// BCTRL instruction.
174 
175  /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
176  /// instruction and the TOC reload required on SVR4 PPC64.
178 
179  /// Return with a flag operand, matched by 'blr'
181 
182  /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
183  /// This copies the bits corresponding to the specified CRREG into the
184  /// resultant GPR. Bits corresponding to other CR regs are undefined.
186 
187  /// Direct move from a VSX register to a GPR
189 
190  /// Direct move from a GPR to a VSX register (algebraic)
192 
193  /// Direct move from a GPR to a VSX register (zero)
195 
196  /// Direct move of 2 consective GPR to a VSX register.
198 
199  /// Extract a subvector from signed integer vector and convert to FP.
200  /// It is primarily used to convert a (widened) illegal integer vector
201  /// type to a legal floating point vector type.
202  /// For example v2i32 -> widened to v4i32 -> v2f64
204 
205  /// Extract a subvector from unsigned integer vector and convert to FP.
206  /// As with SINT_VEC_TO_FP, used for converting illegal types.
208 
209  // FIXME: Remove these once the ANDI glue bug is fixed:
210  /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
211  /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
212  /// implement truncation of i32 or i64 to i1.
214 
215  // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
216  // target (returns (Lo, Hi)). It takes a chain operand.
218 
219  // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
221 
222  // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
224 
225  /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
226  /// instructions. For lack of better number, we use the opcode number
227  /// encoding for the OPC field to identify the compare. For example, 838
228  /// is VCMPGTSH.
230 
231  /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
232  /// altivec VCMP*o instructions. For lack of better number, we use the
233  /// opcode number encoding for the OPC field to identify the compare. For
234  /// example, 838 is VCMPGTSH.
236 
237  /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
238  /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
239  /// condition register to branch on, OPC is the branch opcode to use (e.g.
240  /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
241  /// an optional input flag argument.
243 
244  /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
245  /// loops.
247 
248  /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
249  /// towards zero. Used only as part of the long double-to-int
250  /// conversion sequence.
252 
253  /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
255 
256  /// TC_RETURN - A tail call return.
257  /// operand #0 chain
258  /// operand #1 callee (register or absolute)
259  /// operand #2 stack adjustment
260  /// operand #3 optional in flag
262 
263  /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
266 
267  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
268  /// on PPC32.
270 
271  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
272  /// local dynamic TLS on PPC32.
274 
275  /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
276  /// TLS model, produces an ADDIS8 instruction that adds the GOT
277  /// base to sym\@got\@tprel\@ha.
279 
280  /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
281  /// TLS model, produces a LD instruction with base register G8RReg
282  /// and offset sym\@got\@tprel\@l. This completes the addition that
283  /// finds the offset of "sym" relative to the thread pointer.
285 
286  /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
287  /// model, produces an ADD instruction that adds the contents of
288  /// G8RReg to the thread pointer. Symbol contains a relocation
289  /// sym\@tls which is to be replaced by the thread pointer and
290  /// identifies to the linker that the instruction is part of a
291  /// TLS sequence.
293 
294  /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
295  /// model, produces an ADDIS8 instruction that adds the GOT base
296  /// register to sym\@got\@tlsgd\@ha.
298 
299  /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
300  /// model, produces an ADDI8 instruction that adds G8RReg to
301  /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
302  /// ADDIS_TLSGD_L_ADDR until after register assignment.
304 
305  /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
306  /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
307  /// ADDIS_TLSGD_L_ADDR until after register assignment.
309 
310  /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
311  /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
312  /// register assignment.
314 
315  /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
316  /// model, produces an ADDIS8 instruction that adds the GOT base
317  /// register to sym\@got\@tlsld\@ha.
319 
320  /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
321  /// model, produces an ADDI8 instruction that adds G8RReg to
322  /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
323  /// ADDIS_TLSLD_L_ADDR until after register assignment.
325 
326  /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
327  /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
328  /// ADDIS_TLSLD_L_ADDR until after register assignment.
330 
331  /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
332  /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
333  /// following register assignment.
335 
336  /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
337  /// model, produces an ADDIS8 instruction that adds X3 to
338  /// sym\@dtprel\@ha.
340 
341  /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
342  /// model, produces an ADDI8 instruction that adds G8RReg to
343  /// sym\@got\@dtprel\@l.
345 
346  /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
347  /// during instruction selection to optimize a BUILD_VECTOR into
348  /// operations on splats. This is necessary to avoid losing these
349  /// optimizations due to constant folding.
351 
352  /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
353  /// operand identifies the operating system entry point.
354  SC,
355 
356  /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
358 
359  /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
360  /// history rolling buffer entry.
362 
363  /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
365 
366  /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
367  /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
368  /// or stxvd2x instruction. The chain is necessary because the
369  /// sequence replaces a load and needs to provide the same number
370  /// of outputs.
372 
373  /// An SDNode for swaps that are not associated with any loads/stores
374  /// and thereby have no chain.
376 
377  /// An SDNode for Power9 vector absolute value difference.
378  /// operand #0 vector
379  /// operand #1 vector
380  /// operand #2 constant i32 0 or 1, to indicate whether needs to patch
381  /// the most significant bit for signed i32
382  ///
383  /// Power9 VABSD* instructions are designed to support unsigned integer
384  /// vectors (byte/halfword/word), if we want to make use of them for signed
385  /// integer vectors, we have to flip their sign bits first. To flip sign bit
386  /// for byte/halfword integer vector would become inefficient, but for word
387  /// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
388  /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000)
389  /// => VABSDUW((XVNEGSP a), (XVNEGSP b))
391 
392  /// QVFPERM = This corresponds to the QPX qvfperm instruction.
394 
395  /// QVGPCI = This corresponds to the QPX qvgpci instruction.
397 
398  /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
400 
401  /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
403 
404  /// QBFLT = Access the underlying QPX floating-point boolean
405  /// representation.
407 
408  /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
409  /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
410  /// the GPRC input, then stores it through Ptr. Type can be either i16 or
411  /// i32.
413 
414  /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
415  /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
416  /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
417  /// or i32.
419 
420  /// STFIWX - The STFIWX instruction. The first operand is an input token
421  /// chain, then an f64 value to store, then an address to store it to.
423 
424  /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
425  /// load which sign-extends from a 32-bit integer value into the
426  /// destination 64-bit register.
428 
429  /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
430  /// load which zero-extends from a 32-bit integer value into the
431  /// destination 64-bit register.
433 
434  /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
435  /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
436  /// This can be used for converting loaded integers to floating point.
438 
439  /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
440  /// chain, then an f64 value to store, then an address to store it to,
441  /// followed by a byte-width for the store.
443 
444  /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
445  /// Maps directly to an lxvd2x instruction that will be followed by
446  /// an xxswapd.
448 
449  /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
450  /// Maps directly to an stxvd2x instruction that will be preceded by
451  /// an xxswapd.
453 
454  /// Store scalar integers from VSR.
456 
457  /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
458  /// The 4xf32 load used for v4i1 constants.
460 
461  /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
462  /// except they ensure that the compare input is zero-extended for
463  /// sub-word versions because the atomic loads zero-extend.
465 
466  /// GPRC = TOC_ENTRY GA, TOC
467  /// Loads the entry for GA from the TOC, where the TOC base is given by
468  /// the last operand.
470  };
471 
472  } // end namespace PPCISD
473 
474  /// Define some predicates that are used for node matching.
475  namespace PPC {
476 
477  /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
478  /// VPKUHUM instruction.
479  bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
480  SelectionDAG &DAG);
481 
482  /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
483  /// VPKUWUM instruction.
484  bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
485  SelectionDAG &DAG);
486 
487  /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
488  /// VPKUDUM instruction.
489  bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
490  SelectionDAG &DAG);
491 
492  /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
493  /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
494  bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
495  unsigned ShuffleKind, SelectionDAG &DAG);
496 
497  /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
498  /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
499  bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
500  unsigned ShuffleKind, SelectionDAG &DAG);
501 
502  /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
503  /// a VMRGEW or VMRGOW instruction
504  bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
505  unsigned ShuffleKind, SelectionDAG &DAG);
506  /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
507  /// for a XXSLDWI instruction.
508  bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
509  bool &Swap, bool IsLE);
510 
511  /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
512  /// for a XXBRH instruction.
514 
515  /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
516  /// for a XXBRW instruction.
518 
519  /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
520  /// for a XXBRD instruction.
522 
523  /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
524  /// for a XXBRQ instruction.
526 
527  /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
528  /// for a XXPERMDI instruction.
529  bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
530  bool &Swap, bool IsLE);
531 
532  /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
533  /// shift amount, otherwise return -1.
534  int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
535  SelectionDAG &DAG);
536 
537  /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
538  /// specifies a splat of a single element that is suitable for input to
539  /// VSPLTB/VSPLTH/VSPLTW.
540  bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
541 
542  /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
543  /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
544  /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
545  /// vector into the other. This function will also set a couple of
546  /// output parameters for how much the source vector needs to be shifted and
547  /// what byte number needs to be specified for the instruction to put the
548  /// element in the desired location of the target vector.
549  bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
550  unsigned &InsertAtByte, bool &Swap, bool IsLE);
551 
552  /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
553  /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
554  unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
555 
556  /// get_VSPLTI_elt - If this is a build_vector of constants which can be
557  /// formed by using a vspltis[bhw] instruction of the specified element
558  /// size, return the constant being splatted. The ByteSize field indicates
559  /// the number of bytes of each element [124] -> [bhw].
560  SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
561 
562  /// If this is a qvaligni shuffle mask, return the shift
563  /// amount, otherwise return -1.
565 
566  } // end namespace PPC
567 
569  const PPCSubtarget &Subtarget;
570 
571  public:
572  explicit PPCTargetLowering(const PPCTargetMachine &TM,
573  const PPCSubtarget &STI);
574 
575  /// getTargetNodeName() - This method returns the name of a target specific
576  /// DAG node.
577  const char *getTargetNodeName(unsigned Opcode) const override;
578 
579  bool isSelectSupported(SelectSupportKind Kind) const override {
580  // PowerPC does not support scalar condition selects on vectors.
581  return (Kind != SelectSupportKind::ScalarCondVectorVal);
582  }
583 
584  /// getPreferredVectorAction - The code we generate when vector types are
585  /// legalized by promoting the integer element type is often much worse
586  /// than code we generate if we widen the type for applicable vector types.
587  /// The issue with promoting is that the vector is scalaraized, individual
588  /// elements promoted and then the vector is rebuilt. So say we load a pair
589  /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
590  /// loads, moves back into VSR's (or memory ops if we don't have moves) and
591  /// then the VPERM for the shuffle. All in all a very slow sequence.
593  const override {
594  if (VT.getScalarSizeInBits() % 8 == 0)
595  return TypeWidenVector;
597  }
598 
599  bool useSoftFloat() const override;
600 
601  bool hasSPE() const;
602 
603  MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
604  return MVT::i32;
605  }
606 
607  bool isCheapToSpeculateCttz() const override {
608  return true;
609  }
610 
611  bool isCheapToSpeculateCtlz() const override {
612  return true;
613  }
614 
615  bool isCtlzFast() const override {
616  return true;
617  }
618 
619  bool hasAndNotCompare(SDValue) const override {
620  return true;
621  }
622 
623  bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
624  return VT.isScalarInteger();
625  }
626 
627  bool supportSplitCSR(MachineFunction *MF) const override {
628  return
631  }
632 
633  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
634 
635  void insertCopiesSplitCSR(
636  MachineBasicBlock *Entry,
637  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
638 
639  /// getSetCCResultType - Return the ISD::SETCC ValueType
640  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
641  EVT VT) const override;
642 
643  /// Return true if target always beneficiates from combining into FMA for a
644  /// given value type. This must typically return false on targets where FMA
645  /// takes more cycles to execute than FADD.
646  bool enableAggressiveFMAFusion(EVT VT) const override;
647 
648  /// getPreIndexedAddressParts - returns true by value, base pointer and
649  /// offset pointer and addressing mode by reference if the node's address
650  /// can be legally represented as pre-indexed load / store address.
651  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
652  SDValue &Offset,
654  SelectionDAG &DAG) const override;
655 
656  /// SelectAddressRegReg - Given the specified addressed, check to see if it
657  /// can be represented as an indexed [r+r] operation. Returns false if it
658  /// can be more efficiently represented with [r+imm].
659  bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
660  SelectionDAG &DAG) const;
661 
662  /// SelectAddressRegImm - Returns true if the address N can be represented
663  /// by a base register plus a signed 16-bit displacement [r+imm], and if it
664  /// is not better represented as reg+reg. If Aligned is true, only accept
665  /// displacements suitable for STD and friends, i.e. multiples of 4.
666  bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
667  SelectionDAG &DAG, unsigned Alignment) const;
668 
669  /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
670  /// represented as an indexed [r+r] operation.
671  bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
672  SelectionDAG &DAG) const;
673 
674  Sched::Preference getSchedulingPreference(SDNode *N) const override;
675 
676  /// LowerOperation - Provide custom lowering hooks for some operations.
677  ///
678  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
679 
680  /// ReplaceNodeResults - Replace the results of node with an illegal result
681  /// type with new values built out of custom code.
682  ///
683  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
684  SelectionDAG &DAG) const override;
685 
686  SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
687  SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
688 
689  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
690 
691  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
692  SmallVectorImpl<SDNode *> &Created) const override;
693 
694  unsigned getRegisterByName(const char* RegName, EVT VT,
695  SelectionDAG &DAG) const override;
696 
697  void computeKnownBitsForTargetNode(const SDValue Op,
698  KnownBits &Known,
699  const APInt &DemandedElts,
700  const SelectionDAG &DAG,
701  unsigned Depth = 0) const override;
702 
703  unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
704 
705  bool shouldInsertFencesForAtomic(const Instruction *I) const override {
706  return true;
707  }
708 
709  Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
710  AtomicOrdering Ord) const override;
711  Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
712  AtomicOrdering Ord) const override;
713 
715  EmitInstrWithCustomInserter(MachineInstr &MI,
716  MachineBasicBlock *MBB) const override;
717  MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
718  MachineBasicBlock *MBB,
719  unsigned AtomicSize,
720  unsigned BinOpcode,
721  unsigned CmpOpcode = 0,
722  unsigned CmpPred = 0) const;
723  MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
724  MachineBasicBlock *MBB,
725  bool is8bit,
726  unsigned Opcode,
727  unsigned CmpOpcode = 0,
728  unsigned CmpPred = 0) const;
729 
730  MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
731  MachineBasicBlock *MBB) const;
732 
733  MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
734  MachineBasicBlock *MBB) const;
735 
736  ConstraintType getConstraintType(StringRef Constraint) const override;
737 
738  /// Examine constraint string and operand type and determine a weight value.
739  /// The operand object must already have been set up with the operand type.
740  ConstraintWeight getSingleConstraintMatchWeight(
741  AsmOperandInfo &info, const char *constraint) const override;
742 
743  std::pair<unsigned, const TargetRegisterClass *>
744  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
745  StringRef Constraint, MVT VT) const override;
746 
747  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
748  /// function arguments in the caller parameter area. This is the actual
749  /// alignment, not its logarithm.
750  unsigned getByValTypeAlignment(Type *Ty,
751  const DataLayout &DL) const override;
752 
753  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
754  /// vector. If it is invalid, don't add anything to Ops.
755  void LowerAsmOperandForConstraint(SDValue Op,
756  std::string &Constraint,
757  std::vector<SDValue> &Ops,
758  SelectionDAG &DAG) const override;
759 
760  unsigned
761  getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
762  if (ConstraintCode == "es")
764  else if (ConstraintCode == "o")
766  else if (ConstraintCode == "Q")
768  else if (ConstraintCode == "Z")
770  else if (ConstraintCode == "Zy")
772  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
773  }
774 
775  /// isLegalAddressingMode - Return true if the addressing mode represented
776  /// by AM is legal for this target, for a load/store of the specified type.
777  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
778  Type *Ty, unsigned AS,
779  Instruction *I = nullptr) const override;
780 
781  /// isLegalICmpImmediate - Return true if the specified immediate is legal
782  /// icmp immediate, that is the target has icmp instructions which can
783  /// compare a register against the immediate without having to materialize
784  /// the immediate into a register.
785  bool isLegalICmpImmediate(int64_t Imm) const override;
786 
787  /// isLegalAddImmediate - Return true if the specified immediate is legal
788  /// add immediate, that is the target has add instructions which can
789  /// add a register and the immediate without having to materialize
790  /// the immediate into a register.
791  bool isLegalAddImmediate(int64_t Imm) const override;
792 
793  /// isTruncateFree - Return true if it's free to truncate a value of
794  /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
795  /// register X1 to i32 by referencing its sub-register R1.
796  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
797  bool isTruncateFree(EVT VT1, EVT VT2) const override;
798 
799  bool isZExtFree(SDValue Val, EVT VT2) const override;
800 
801  bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
802 
803  /// Returns true if it is beneficial to convert a load of a constant
804  /// to just the constant itself.
805  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
806  Type *Ty) const override;
807 
808  bool convertSelectOfConstantsToMath(EVT VT) const override {
809  return true;
810  }
811 
812  // Returns true if the address of the global is stored in TOC entry.
813  bool isAccessedAsGotIndirect(SDValue N) const;
814 
815  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
816 
817  bool getTgtMemIntrinsic(IntrinsicInfo &Info,
818  const CallInst &I,
819  MachineFunction &MF,
820  unsigned Intrinsic) const override;
821 
822  /// getOptimalMemOpType - Returns the target specific optimal type for load
823  /// and store operations as a result of memset, memcpy, and memmove
824  /// lowering. If DstAlign is zero that means it's safe to destination
825  /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
826  /// means there isn't a need to check it against alignment requirement,
827  /// probably because the source does not need to be loaded. If 'IsMemset' is
828  /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
829  /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
830  /// source is constant so it does not need to be loaded.
831  /// It returns EVT::Other if the type should be determined using generic
832  /// target-independent logic.
833  EVT
834  getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
835  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
836  MachineFunction &MF) const override;
837 
838  /// Is unaligned memory access allowed for the given type, and is it fast
839  /// relative to software emulation.
840  bool allowsMisalignedMemoryAccesses(EVT VT,
841  unsigned AddrSpace,
842  unsigned Align = 1,
843  bool *Fast = nullptr) const override;
844 
845  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
846  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
847  /// expanded to FMAs when this method returns true, otherwise fmuladd is
848  /// expanded to fmul + fadd.
849  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
850 
851  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
852 
853  // Should we expand the build vector with shuffles?
854  bool
855  shouldExpandBuildVectorWithShuffles(EVT VT,
856  unsigned DefinedValues) const override;
857 
858  /// createFastISel - This method returns a target-specific FastISel object,
859  /// or null if the target does not support "fast" instruction selection.
861  const TargetLibraryInfo *LibInfo) const override;
862 
863  /// Returns true if an argument of type Ty needs to be passed in a
864  /// contiguous block of registers in calling convention CallConv.
866  Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
867  // We support any array type as "consecutive" block in the parameter
868  // save area. The element type defines the alignment requirement and
869  // whether the argument should go in GPRs, FPRs, or VRs if available.
870  //
871  // Note that clang uses this capability both to implement the ELFv2
872  // homogeneous float/vector aggregate ABI, and to avoid having to use
873  // "byval" when passing aggregates that might fully fit in registers.
874  return Ty->isArrayTy();
875  }
876 
877  /// If a physical register, this returns the register that receives the
878  /// exception address on entry to an EH pad.
879  unsigned
880  getExceptionPointerRegister(const Constant *PersonalityFn) const override;
881 
882  /// If a physical register, this returns the register that receives the
883  /// exception typeid on entry to a landing pad.
884  unsigned
885  getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
886 
887  /// Override to support customized stack guard loading.
888  bool useLoadStackGuardNode() const override;
889  void insertSSPDeclarations(Module &M) const override;
890 
891  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
892 
893  unsigned getJumpTableEncoding() const override;
894  bool isJumpTableRelative() const override;
895  SDValue getPICJumpTableRelocBase(SDValue Table,
896  SelectionDAG &DAG) const override;
897  const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
898  unsigned JTI,
899  MCContext &Ctx) const override;
900 
901  unsigned getNumRegistersForCallingConv(LLVMContext &Context,
902  CallingConv:: ID CC,
903  EVT VT) const override;
904 
905  MVT getRegisterTypeForCallingConv(LLVMContext &Context,
906  CallingConv:: ID CC,
907  EVT VT) const override;
908 
909  private:
910  struct ReuseLoadInfo {
911  SDValue Ptr;
912  SDValue Chain;
913  SDValue ResChain;
914  MachinePointerInfo MPI;
915  bool IsDereferenceable = false;
916  bool IsInvariant = false;
917  unsigned Alignment = 0;
918  AAMDNodes AAInfo;
919  const MDNode *Ranges = nullptr;
920 
921  ReuseLoadInfo() = default;
922 
923  MachineMemOperand::Flags MMOFlags() const {
925  if (IsDereferenceable)
927  if (IsInvariant)
929  return F;
930  }
931  };
932 
933  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
934  // Addrspacecasts are always noops.
935  return true;
936  }
937 
938  bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
939  SelectionDAG &DAG,
941  void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
942  SelectionDAG &DAG) const;
943 
944  void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
945  SelectionDAG &DAG, const SDLoc &dl) const;
946  SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
947  const SDLoc &dl) const;
948 
949  bool directMoveIsProfitable(const SDValue &Op) const;
950  SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
951  const SDLoc &dl) const;
952 
953  SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
954  const SDLoc &dl) const;
955 
956  SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
957  SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
958 
959  bool
960  IsEligibleForTailCallOptimization(SDValue Callee,
961  CallingConv::ID CalleeCC,
962  bool isVarArg,
964  SelectionDAG& DAG) const;
965 
966  bool
967  IsEligibleForTailCallOptimization_64SVR4(
968  SDValue Callee,
969  CallingConv::ID CalleeCC,
971  bool isVarArg,
974  SelectionDAG& DAG) const;
975 
976  SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
977  SDValue Chain, SDValue &LROpOut,
978  SDValue &FPOpOut,
979  const SDLoc &dl) const;
980 
982  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
983  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
984  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
985  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
986  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
987  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
988  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
989  SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
991  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
992  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
993  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
994  SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
995  SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
997  SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
998  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
999  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1000  SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1001  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1002  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1003  const SDLoc &dl) const;
1004  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1005  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1006  SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1007  SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1008  SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
1009  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1013  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1014  SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1015  SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
1016  SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
1017  SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
1019  SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
1020  SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1021  SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1022 
1023  SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
1024  SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
1025 
1026  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1027  CallingConv::ID CallConv, bool isVarArg,
1028  const SmallVectorImpl<ISD::InputArg> &Ins,
1029  const SDLoc &dl, SelectionDAG &DAG,
1030  SmallVectorImpl<SDValue> &InVals) const;
1031  SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
1032  bool isTailCall, bool isVarArg, bool isPatchPoint,
1033  bool hasNest, SelectionDAG &DAG,
1034  SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
1035  SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
1036  SDValue &Callee, int SPDiff, unsigned NumBytes,
1037  const SmallVectorImpl<ISD::InputArg> &Ins,
1038  SmallVectorImpl<SDValue> &InVals,
1039  ImmutableCallSite CS) const;
1040 
1041  SDValue
1042  LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1043  const SmallVectorImpl<ISD::InputArg> &Ins,
1044  const SDLoc &dl, SelectionDAG &DAG,
1045  SmallVectorImpl<SDValue> &InVals) const override;
1046 
1048  SmallVectorImpl<SDValue> &InVals) const override;
1049 
1050  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1051  bool isVarArg,
1052  const SmallVectorImpl<ISD::OutputArg> &Outs,
1053  LLVMContext &Context) const override;
1054 
1055  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1056  const SmallVectorImpl<ISD::OutputArg> &Outs,
1057  const SmallVectorImpl<SDValue> &OutVals,
1058  const SDLoc &dl, SelectionDAG &DAG) const override;
1059 
1060  SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
1061  SelectionDAG &DAG, SDValue ArgVal,
1062  const SDLoc &dl) const;
1063 
1064  SDValue LowerFormalArguments_Darwin(
1065  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1066  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1067  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1068  SDValue LowerFormalArguments_64SVR4(
1069  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1070  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1071  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1072  SDValue LowerFormalArguments_32SVR4(
1073  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1074  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1075  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1076 
1077  SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
1078  SDValue CallSeqStart,
1079  ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1080  const SDLoc &dl) const;
1081 
1082  SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
1083  CallingConv::ID CallConv, bool isVarArg,
1084  bool isTailCall, bool isPatchPoint,
1085  const SmallVectorImpl<ISD::OutputArg> &Outs,
1086  const SmallVectorImpl<SDValue> &OutVals,
1087  const SmallVectorImpl<ISD::InputArg> &Ins,
1088  const SDLoc &dl, SelectionDAG &DAG,
1089  SmallVectorImpl<SDValue> &InVals,
1090  ImmutableCallSite CS) const;
1091  SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
1092  CallingConv::ID CallConv, bool isVarArg,
1093  bool isTailCall, bool isPatchPoint,
1094  const SmallVectorImpl<ISD::OutputArg> &Outs,
1095  const SmallVectorImpl<SDValue> &OutVals,
1096  const SmallVectorImpl<ISD::InputArg> &Ins,
1097  const SDLoc &dl, SelectionDAG &DAG,
1098  SmallVectorImpl<SDValue> &InVals,
1099  ImmutableCallSite CS) const;
1100  SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
1101  CallingConv::ID CallConv, bool isVarArg,
1102  bool isTailCall, bool isPatchPoint,
1103  const SmallVectorImpl<ISD::OutputArg> &Outs,
1104  const SmallVectorImpl<SDValue> &OutVals,
1105  const SmallVectorImpl<ISD::InputArg> &Ins,
1106  const SDLoc &dl, SelectionDAG &DAG,
1107  SmallVectorImpl<SDValue> &InVals,
1108  ImmutableCallSite CS) const;
1109 
1110  SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1111  SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1112  SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1113 
1114  SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
1115  SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
1116  SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
1117  SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
1118  SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
1119  SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
1120  SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
1121  SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
1122  SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
1123  SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
1124  SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
1125  SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
1126  SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
1127 
1128  /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
1129  /// SETCC with integer subtraction when (1) there is a legal way of doing it
1130  /// (2) keeping the result of comparison in GPR has performance benefit.
1131  SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
1132 
1133  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1134  int &RefinementSteps, bool &UseOneConstNR,
1135  bool Reciprocal) const override;
1136  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1137  int &RefinementSteps) const override;
1138  unsigned combineRepeatedFPDivisors() const override;
1139 
1140  CCAssignFn *useFastISelCCs(unsigned Flag) const;
1141 
1142  SDValue
1143  combineElementTruncationToVectorTruncation(SDNode *N,
1144  DAGCombinerInfo &DCI) const;
1145 
1146  /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
1147  /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
1148  /// essentially any shuffle of v8i16 vectors that just inserts one element
1149  /// from one vector into the other.
1150  SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1151 
1152  /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
1153  /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
1154  /// essentially v16i8 vector version of VINSERTH.
1155  SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1156 
1157  // Return whether the call instruction can potentially be optimized to a
1158  // tail call. This will cause the optimizers to attempt to move, or
1159  // duplicate return instructions to help enable tail call optimizations.
1160  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1161  bool hasBitPreservingFPLogic(EVT VT) const override;
1162  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
1163  }; // end class PPCTargetLowering
1164 
1165  namespace PPC {
1166 
1168  const TargetLibraryInfo *LibInfo);
1169 
1170  } // end namespace PPC
1171 
1172  bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
1173  CCValAssign::LocInfo &LocInfo,
1174  ISD::ArgFlagsTy &ArgFlags,
1175  CCState &State);
1176 
1177  bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
1178  MVT &LocVT,
1179  CCValAssign::LocInfo &LocInfo,
1180  ISD::ArgFlagsTy &ArgFlags,
1181  CCState &State);
1182 
1183  bool
1184  CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
1185  MVT &LocVT,
1186  CCValAssign::LocInfo &LocInfo,
1187  ISD::ArgFlagsTy &ArgFlags,
1188  CCState &State);
1189 
1190  bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
1191  MVT &LocVT,
1192  CCValAssign::LocInfo &LocInfo,
1193  ISD::ArgFlagsTy &ArgFlags,
1194  CCState &State);
1195 
1196  bool isIntS16Immediate(SDNode *N, int16_t &Imm);
1197  bool isIntS16Immediate(SDValue Op, int16_t &Imm);
1198 
1199 } // end namespace llvm
1200 
1201 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:877
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
bool isSelectSupported(SelectSupportKind Kind) const override
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
Return with a flag operand, matched by &#39;blr&#39;.
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
getPreferredVectorAction - The code we generate when vector types are legalized by promoting the inte...
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
LLVMContext & Context
QVFPERM = This corresponds to the QPX qvfperm instruction.
This class represents lattice values for constants.
Definition: AllocatorList.h:24
GPRC = address of GLOBAL_OFFSET_TABLE.
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:146
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
QBRC, CHAIN = QVLFSb CHAIN, Ptr The 4xf32 load used for v4i1 constants.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction...
Function Alias Analysis Results
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:321
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
unsigned const TargetRegisterInfo * TRI
Metadata node.
Definition: Metadata.h:864
F(f)
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
CALL - A direct function call.
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
Floating-point-to-interger conversion instructions.
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:36
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:743
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
This file contains the simple types necessary to represent the attributes associated with functions a...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
Direct move from a GPR to a VSX register (algebraic)
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
QVALIGNI = This corresponds to the QPX qvaligni instruction.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
Context object for machine code objects.
Definition: MCContext.h:63
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:67
An SDNode for Power9 vector absolute value difference.
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
This contains information for each constraint that we are lowering.
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
amdgpu Simplify well known AMD library false Value * Callee
Analysis containing CSE Info
Definition: CSEInfo.cpp:21
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:118
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
FSEL - Traditional three-operand fsel node.
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
static Value * LowerBSWAP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower bswap of V before the specified instruction IP.
unsigned getScalarSizeInBits() const
This is an important base class in LLVM.
Definition: Constant.h:42
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
SExtVElems, takes an input vector of a smaller type and sign extends to an output vector of a larger ...
VECINSERT - The PPC vector insert instruction.
Direct move from a VSX register to a GPR.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:934
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the specified isSplatShuffleMask...
STFIWX - The STFIWX instruction.
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
Store scalar integers from VSR.
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget)
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
QVESPLATI = This corresponds to the QPX qvesplati instruction.
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Common code between 32-bit and 64-bit PowerPC targets.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1...
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
This class contains a discriminated union of information about pointers in memory operands...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
Extract a subvector from unsigned integer vector and convert to FP.
QBFLT = Access the underlying QPX floating-point boolean representation.
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate...
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
CCState - This class holds information needed while lowering arguments and return values...
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
XXSPLT - The PPC VSX splat instructions.
bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
VECSHL - The PPC vector shift left instruction.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:213
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
Provides information about what library functions are available for the current target.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:644
CHAIN = SC CHAIN, Imm128 - System call.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:884
Represents one node in the SelectionDAG.
VPERM - The PPC VPERM Instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction...
static bool Enabled
Definition: Statistic.cpp:51
const Function & getFunction() const
Return the LLVM function that this machine code represents.
STXSIX - The STXSI[bh]X instruction.
i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after execu...
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
Class for arbitrary precision integers.
Definition: APInt.h:70
QVGPCI = This corresponds to the QPX qvgpci instruction.
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2...
Flags
Flags values. These may be or&#39;d together.
amdgpu Simplify well known AMD library false Value Value * Arg
GPRC = address of GLOBAL_OFFSET_TABLE.
Representation of each machine instruction.
Definition: MachineInstr.h:64
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SelectSupportKind
Enum that describes what type of support for selects the target has.
Reciprocal estimate instructions (unary FP ops).
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
Establish a view to a call site for examination.
Definition: CallSite.h:711
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Direct move from a GPR to a VSX register (zero)
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The CMPB instruction (takes two operands of i32 or i64).
The memory access always returns the same value (or traps).
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction. ...
uint32_t Size
Definition: Profile.cpp:47
TC_RETURN - A tail call return.
const unsigned Kind
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
XXREVERSE - The PPC VSX reverse instruction.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Direct move of 2 consective GPR to a VSX register.
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
These nodes represent PPC shifts.
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
Extract a subvector from signed integer vector and convert to FP.
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain...
RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the altivec VCMP*o instructions.
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate...
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
XXPERMDI - The PPC XXPERMDI instruction.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:221
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:914
int isQVALIGNIShuffleMask(SDNode *N)
If this is a qvaligni shuffle mask, return the shift amount, otherwise return -1. ...