LLVM  8.0.1
RISCVISelLowering.cpp
Go to the documentation of this file.
1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that RISCV uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "RISCVISelLowering.h"
16 #include "RISCV.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
30 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/Support/Debug.h"
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "riscv-lower"
39 
40 STATISTIC(NumTailCalls, "Number of tail calls");
41 
43  const RISCVSubtarget &STI)
44  : TargetLowering(TM), Subtarget(STI) {
45 
46  MVT XLenVT = Subtarget.getXLenVT();
47 
48  // Set up the register classes.
49  addRegisterClass(XLenVT, &RISCV::GPRRegClass);
50 
51  if (Subtarget.hasStdExtF())
52  addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
53  if (Subtarget.hasStdExtD())
54  addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
55 
56  // Compute derived properties from the register classes.
58 
60 
61  for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
62  setLoadExtAction(N, XLenVT, MVT::i1, Promote);
63 
64  // TODO: add all necessary setOperationAction calls.
66 
71 
74 
79 
80  for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
82 
83  if (Subtarget.is64Bit()) {
88  }
89 
90  if (!Subtarget.hasStdExtM()) {
98  }
99 
104 
108 
115 
116  ISD::CondCode FPCCToExtend[] = {
120 
121  ISD::NodeType FPOpToExtend[] = {
123 
124  if (Subtarget.hasStdExtF()) {
127  for (auto CC : FPCCToExtend)
132  for (auto Op : FPOpToExtend)
134  }
135 
136  if (Subtarget.hasStdExtD()) {
139  for (auto CC : FPCCToExtend)
146  for (auto Op : FPOpToExtend)
148  }
149 
153 
154  if (Subtarget.hasStdExtA()) {
157  } else {
159  }
160 
162 
163  // Function alignments (log2).
164  unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2;
165  setMinFunctionAlignment(FunctionAlignment);
166  setPrefFunctionAlignment(FunctionAlignment);
167 
168  // Effectively disable jump table generation.
170 }
171 
173  EVT VT) const {
174  if (!VT.isVector())
175  return getPointerTy(DL);
177 }
178 
180  const CallInst &I,
181  MachineFunction &MF,
182  unsigned Intrinsic) const {
183  switch (Intrinsic) {
184  default:
185  return false;
195  PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
197  Info.memVT = MVT::getVT(PtrTy->getElementType());
198  Info.ptrVal = I.getArgOperand(0);
199  Info.offset = 0;
200  Info.align = 4;
203  return true;
204  }
205 }
206 
208  const AddrMode &AM, Type *Ty,
209  unsigned AS,
210  Instruction *I) const {
211  // No global is ever allowed as a base.
212  if (AM.BaseGV)
213  return false;
214 
215  // Require a 12-bit signed offset.
216  if (!isInt<12>(AM.BaseOffs))
217  return false;
218 
219  switch (AM.Scale) {
220  case 0: // "r+i" or just "i", depending on HasBaseReg.
221  break;
222  case 1:
223  if (!AM.HasBaseReg) // allow "r+i".
224  break;
225  return false; // disallow "r+r" or "r+r+i".
226  default:
227  return false;
228  }
229 
230  return true;
231 }
232 
234  return isInt<12>(Imm);
235 }
236 
238  return isInt<12>(Imm);
239 }
240 
241 // On RV32, 64-bit integers are split into their high and low parts and held
242 // in two different registers, so the trunc is free since the low register can
243 // just be used.
244 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
245  if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
246  return false;
247  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
248  unsigned DestBits = DstTy->getPrimitiveSizeInBits();
249  return (SrcBits == 64 && DestBits == 32);
250 }
251 
252 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
253  if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
254  !SrcVT.isInteger() || !DstVT.isInteger())
255  return false;
256  unsigned SrcBits = SrcVT.getSizeInBits();
257  unsigned DestBits = DstVT.getSizeInBits();
258  return (SrcBits == 64 && DestBits == 32);
259 }
260 
262  // Zexts are free if they can be combined with a load.
263  if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
264  EVT MemVT = LD->getMemoryVT();
265  if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
266  (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
267  (LD->getExtensionType() == ISD::NON_EXTLOAD ||
268  LD->getExtensionType() == ISD::ZEXTLOAD))
269  return true;
270  }
271 
272  return TargetLowering::isZExtFree(Val, VT2);
273 }
274 
276  return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
277 }
278 
279 // Changes the condition code and swaps operands if necessary, so the SetCC
280 // operation matches one of the comparisons supported directly in the RISC-V
281 // ISA.
282 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
283  switch (CC) {
284  default:
285  break;
286  case ISD::SETGT:
287  case ISD::SETLE:
288  case ISD::SETUGT:
289  case ISD::SETULE:
291  std::swap(LHS, RHS);
292  break;
293  }
294 }
295 
296 // Return the RISC-V branch opcode that matches the given DAG integer
297 // condition code. The CondCode must be one of those supported by the RISC-V
298 // ISA (see normaliseSetCC).
300  switch (CC) {
301  default:
302  llvm_unreachable("Unsupported CondCode");
303  case ISD::SETEQ:
304  return RISCV::BEQ;
305  case ISD::SETNE:
306  return RISCV::BNE;
307  case ISD::SETLT:
308  return RISCV::BLT;
309  case ISD::SETGE:
310  return RISCV::BGE;
311  case ISD::SETULT:
312  return RISCV::BLTU;
313  case ISD::SETUGE:
314  return RISCV::BGEU;
315  }
316 }
317 
319  SelectionDAG &DAG) const {
320  switch (Op.getOpcode()) {
321  default:
322  report_fatal_error("unimplemented operand");
323  case ISD::GlobalAddress:
324  return lowerGlobalAddress(Op, DAG);
325  case ISD::BlockAddress:
326  return lowerBlockAddress(Op, DAG);
327  case ISD::ConstantPool:
328  return lowerConstantPool(Op, DAG);
329  case ISD::SELECT:
330  return lowerSELECT(Op, DAG);
331  case ISD::VASTART:
332  return lowerVASTART(Op, DAG);
333  case ISD::FRAMEADDR:
334  return lowerFRAMEADDR(Op, DAG);
335  case ISD::RETURNADDR:
336  return lowerRETURNADDR(Op, DAG);
337  }
338 }
339 
340 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
341  SelectionDAG &DAG) const {
342  SDLoc DL(Op);
343  EVT Ty = Op.getValueType();
344  GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
345  const GlobalValue *GV = N->getGlobal();
346  int64_t Offset = N->getOffset();
347  MVT XLenVT = Subtarget.getXLenVT();
348 
349  if (isPositionIndependent())
350  report_fatal_error("Unable to lowerGlobalAddress");
351  // In order to maximise the opportunity for common subexpression elimination,
352  // emit a separate ADD node for the global address offset instead of folding
353  // it in the global address node. Later peephole optimisations may choose to
354  // fold it back in when profitable.
355  SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_HI);
356  SDValue GALo = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_LO);
357  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
358  SDValue MNLo =
359  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
360  if (Offset != 0)
361  return DAG.getNode(ISD::ADD, DL, Ty, MNLo,
362  DAG.getConstant(Offset, DL, XLenVT));
363  return MNLo;
364 }
365 
366 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
367  SelectionDAG &DAG) const {
368  SDLoc DL(Op);
369  EVT Ty = Op.getValueType();
370  BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
371  const BlockAddress *BA = N->getBlockAddress();
372  int64_t Offset = N->getOffset();
373 
374  if (isPositionIndependent())
375  report_fatal_error("Unable to lowerBlockAddress");
376 
377  SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
378  SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO);
379  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0);
380  SDValue MNLo =
381  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0);
382  return MNLo;
383 }
384 
385 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
386  SelectionDAG &DAG) const {
387  SDLoc DL(Op);
388  EVT Ty = Op.getValueType();
389  ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
390  const Constant *CPA = N->getConstVal();
391  int64_t Offset = N->getOffset();
392  unsigned Alignment = N->getAlignment();
393 
394  if (!isPositionIndependent()) {
395  SDValue CPAHi =
396  DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_HI);
397  SDValue CPALo =
398  DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_LO);
399  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, CPAHi), 0);
400  SDValue MNLo =
401  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, CPALo), 0);
402  return MNLo;
403  } else {
404  report_fatal_error("Unable to lowerConstantPool");
405  }
406 }
407 
408 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
409  SDValue CondV = Op.getOperand(0);
410  SDValue TrueV = Op.getOperand(1);
411  SDValue FalseV = Op.getOperand(2);
412  SDLoc DL(Op);
413  MVT XLenVT = Subtarget.getXLenVT();
414 
415  // If the result type is XLenVT and CondV is the output of a SETCC node
416  // which also operated on XLenVT inputs, then merge the SETCC node into the
417  // lowered RISCVISD::SELECT_CC to take advantage of the integer
418  // compare+branch instructions. i.e.:
419  // (select (setcc lhs, rhs, cc), truev, falsev)
420  // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
421  if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
422  CondV.getOperand(0).getSimpleValueType() == XLenVT) {
423  SDValue LHS = CondV.getOperand(0);
424  SDValue RHS = CondV.getOperand(1);
425  auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
426  ISD::CondCode CCVal = CC->get();
427 
428  normaliseSetCC(LHS, RHS, CCVal);
429 
430  SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
431  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
432  SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
433  return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
434  }
435 
436  // Otherwise:
437  // (select condv, truev, falsev)
438  // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
439  SDValue Zero = DAG.getConstant(0, DL, XLenVT);
440  SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
441 
442  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
443  SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
444 
445  return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
446 }
447 
448 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
451 
452  SDLoc DL(Op);
453  SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
455 
456  // vastart just stores the address of the VarArgsFrameIndex slot into the
457  // memory location argument.
458  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
459  return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
460  MachinePointerInfo(SV));
461 }
462 
463 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
464  SelectionDAG &DAG) const {
465  const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
467  MachineFrameInfo &MFI = MF.getFrameInfo();
468  MFI.setFrameAddressIsTaken(true);
469  unsigned FrameReg = RI.getFrameRegister(MF);
470  int XLenInBytes = Subtarget.getXLen() / 8;
471 
472  EVT VT = Op.getValueType();
473  SDLoc DL(Op);
474  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
475  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
476  while (Depth--) {
477  int Offset = -(XLenInBytes * 2);
478  SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
479  DAG.getIntPtrConstant(Offset, DL));
480  FrameAddr =
481  DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
482  }
483  return FrameAddr;
484 }
485 
486 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
487  SelectionDAG &DAG) const {
488  const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
490  MachineFrameInfo &MFI = MF.getFrameInfo();
491  MFI.setReturnAddressIsTaken(true);
492  MVT XLenVT = Subtarget.getXLenVT();
493  int XLenInBytes = Subtarget.getXLen() / 8;
494 
496  return SDValue();
497 
498  EVT VT = Op.getValueType();
499  SDLoc DL(Op);
500  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
501  if (Depth) {
502  int Off = -XLenInBytes;
503  SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
504  SDValue Offset = DAG.getConstant(Off, DL, VT);
505  return DAG.getLoad(VT, DL, DAG.getEntryNode(),
506  DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
508  }
509 
510  // Return the value of the return address register, marking it an implicit
511  // live-in.
512  unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
513  return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
514 }
515 
516 // Return true if the given node is a shift with a non-constant shift amount.
517 static bool isVariableShift(SDValue Val) {
518  switch (Val.getOpcode()) {
519  default:
520  return false;
521  case ISD::SHL:
522  case ISD::SRA:
523  case ISD::SRL:
524  return Val.getOperand(1).getOpcode() != ISD::Constant;
525  }
526 }
527 
528 // Returns true if the given node is an sdiv, udiv, or urem with non-constant
529 // operands.
530 static bool isVariableSDivUDivURem(SDValue Val) {
531  switch (Val.getOpcode()) {
532  default:
533  return false;
534  case ISD::SDIV:
535  case ISD::UDIV:
536  case ISD::UREM:
537  return Val.getOperand(0).getOpcode() != ISD::Constant &&
538  Val.getOperand(1).getOpcode() != ISD::Constant;
539  }
540 }
541 
543  DAGCombinerInfo &DCI) const {
544  SelectionDAG &DAG = DCI.DAG;
545 
546  switch (N->getOpcode()) {
547  default:
548  break;
549  case ISD::SHL:
550  case ISD::SRL:
551  case ISD::SRA: {
552  assert(Subtarget.getXLen() == 64 && "Combine should be 64-bit only");
553  if (!DCI.isBeforeLegalize())
554  break;
555  SDValue RHS = N->getOperand(1);
556  if (N->getValueType(0) != MVT::i32 || RHS->getOpcode() == ISD::Constant ||
557  (RHS->getOpcode() == ISD::AssertZext &&
558  cast<VTSDNode>(RHS->getOperand(1))->getVT().getSizeInBits() <= 5))
559  break;
560  SDValue LHS = N->getOperand(0);
561  SDLoc DL(N);
562  SDValue NewRHS =
563  DAG.getNode(ISD::AssertZext, DL, RHS.getValueType(), RHS,
564  DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 5)));
565  return DCI.CombineTo(
566  N, DAG.getNode(N->getOpcode(), DL, LHS.getValueType(), LHS, NewRHS));
567  }
568  case ISD::ANY_EXTEND: {
569  // If any-extending an i32 variable-length shift or sdiv/udiv/urem to i64,
570  // then instead sign-extend in order to increase the chance of being able
571  // to select the sllw/srlw/sraw/divw/divuw/remuw instructions.
572  SDValue Src = N->getOperand(0);
573  if (N->getValueType(0) != MVT::i64 || Src.getValueType() != MVT::i32)
574  break;
575  if (!isVariableShift(Src) &&
576  !(Subtarget.hasStdExtM() && isVariableSDivUDivURem(Src)))
577  break;
578  SDLoc DL(N);
579  return DCI.CombineTo(N, DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src));
580  }
581  case RISCVISD::SplitF64: {
582  // If the input to SplitF64 is just BuildPairF64 then the operation is
583  // redundant. Instead, use BuildPairF64's operands directly.
584  SDValue Op0 = N->getOperand(0);
585  if (Op0->getOpcode() != RISCVISD::BuildPairF64)
586  break;
587  return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
588  }
589  }
590 
591  return SDValue();
592 }
593 
595  MachineBasicBlock *BB) {
596  assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
597 
598  MachineFunction &MF = *BB->getParent();
599  DebugLoc DL = MI.getDebugLoc();
600  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
601  const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
602  unsigned LoReg = MI.getOperand(0).getReg();
603  unsigned HiReg = MI.getOperand(1).getReg();
604  unsigned SrcReg = MI.getOperand(2).getReg();
605  const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
606  int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
607 
608  TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
609  RI);
610  MachineMemOperand *MMO =
611  MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
613  BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
614  .addFrameIndex(FI)
615  .addImm(0)
616  .addMemOperand(MMO);
617  BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
618  .addFrameIndex(FI)
619  .addImm(4)
620  .addMemOperand(MMO);
621  MI.eraseFromParent(); // The pseudo instruction is gone now.
622  return BB;
623 }
624 
626  MachineBasicBlock *BB) {
627  assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
628  "Unexpected instruction");
629 
630  MachineFunction &MF = *BB->getParent();
631  DebugLoc DL = MI.getDebugLoc();
632  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
633  const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
634  unsigned DstReg = MI.getOperand(0).getReg();
635  unsigned LoReg = MI.getOperand(1).getReg();
636  unsigned HiReg = MI.getOperand(2).getReg();
637  const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
638  int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
639 
640  MachineMemOperand *MMO =
641  MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
643  BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
644  .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
645  .addFrameIndex(FI)
646  .addImm(0)
647  .addMemOperand(MMO);
648  BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
649  .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
650  .addFrameIndex(FI)
651  .addImm(4)
652  .addMemOperand(MMO);
653  TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
654  MI.eraseFromParent(); // The pseudo instruction is gone now.
655  return BB;
656 }
657 
660  MachineBasicBlock *BB) const {
661  switch (MI.getOpcode()) {
662  default:
663  llvm_unreachable("Unexpected instr type to insert");
664  case RISCV::Select_GPR_Using_CC_GPR:
665  case RISCV::Select_FPR32_Using_CC_GPR:
666  case RISCV::Select_FPR64_Using_CC_GPR:
667  break;
668  case RISCV::BuildPairF64Pseudo:
669  return emitBuildPairF64Pseudo(MI, BB);
670  case RISCV::SplitF64Pseudo:
671  return emitSplitF64Pseudo(MI, BB);
672  }
673 
674  // To "insert" a SELECT instruction, we actually have to insert the triangle
675  // control-flow pattern. The incoming instruction knows the destination vreg
676  // to set, the condition code register to branch on, the true/false values to
677  // select between, and the condcode to use to select the appropriate branch.
678  //
679  // We produce the following control flow:
680  // HeadMBB
681  // | \
682  // | IfFalseMBB
683  // | /
684  // TailMBB
686  const BasicBlock *LLVM_BB = BB->getBasicBlock();
687  DebugLoc DL = MI.getDebugLoc();
689 
690  MachineBasicBlock *HeadMBB = BB;
691  MachineFunction *F = BB->getParent();
692  MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
693  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
694 
695  F->insert(I, IfFalseMBB);
696  F->insert(I, TailMBB);
697  // Move all remaining instructions to TailMBB.
698  TailMBB->splice(TailMBB->begin(), HeadMBB,
699  std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end());
700  // Update machine-CFG edges by transferring all successors of the current
701  // block to the new block which will contain the Phi node for the select.
702  TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
703  // Set the successors for HeadMBB.
704  HeadMBB->addSuccessor(IfFalseMBB);
705  HeadMBB->addSuccessor(TailMBB);
706 
707  // Insert appropriate branch.
708  unsigned LHS = MI.getOperand(1).getReg();
709  unsigned RHS = MI.getOperand(2).getReg();
710  auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
711  unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
712 
713  BuildMI(HeadMBB, DL, TII.get(Opcode))
714  .addReg(LHS)
715  .addReg(RHS)
716  .addMBB(TailMBB);
717 
718  // IfFalseMBB just falls through to TailMBB.
719  IfFalseMBB->addSuccessor(TailMBB);
720 
721  // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
722  BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI),
723  MI.getOperand(0).getReg())
724  .addReg(MI.getOperand(4).getReg())
725  .addMBB(HeadMBB)
726  .addReg(MI.getOperand(5).getReg())
727  .addMBB(IfFalseMBB);
728 
729  MI.eraseFromParent(); // The pseudo instruction is gone now.
730  return TailMBB;
731 }
732 
733 // Calling Convention Implementation.
734 // The expectations for frontend ABI lowering vary from target to target.
735 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
736 // details, but this is a longer term goal. For now, we simply try to keep the
737 // role of the frontend as simple and well-defined as possible. The rules can
738 // be summarised as:
739 // * Never split up large scalar arguments. We handle them here.
740 // * If a hardfloat calling convention is being used, and the struct may be
741 // passed in a pair of registers (fp+fp, int+fp), and both registers are
742 // available, then pass as two separate arguments. If either the GPRs or FPRs
743 // are exhausted, then pass according to the rule below.
744 // * If a struct could never be passed in registers or directly in a stack
745 // slot (as it is larger than 2*XLEN and the floating point rules don't
746 // apply), then pass it using a pointer with the byval attribute.
747 // * If a struct is less than 2*XLEN, then coerce to either a two-element
748 // word-sized array or a 2*XLEN scalar (depending on alignment).
749 // * The frontend can determine whether a struct is returned by reference or
750 // not based on its size and fields. If it will be returned by reference, the
751 // frontend must modify the prototype so a pointer with the sret annotation is
752 // passed as the first argument. This is not necessary for large scalar
753 // returns.
754 // * Struct return values and varargs should be coerced to structs containing
755 // register-size fields in the same situations they would be for fixed
756 // arguments.
757 
758 static const MCPhysReg ArgGPRs[] = {
759  RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
760  RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
761 };
762 
763 // Pass a 2*XLEN argument that has been split into two XLEN values through
764 // registers or the stack as necessary.
765 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
766  ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
767  MVT ValVT2, MVT LocVT2,
768  ISD::ArgFlagsTy ArgFlags2) {
769  unsigned XLenInBytes = XLen / 8;
770  if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
771  // At least one half can be passed via register.
772  State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
773  VA1.getLocVT(), CCValAssign::Full));
774  } else {
775  // Both halves must be passed on the stack, with proper alignment.
776  unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
777  State.addLoc(
779  State.AllocateStack(XLenInBytes, StackAlign),
780  VA1.getLocVT(), CCValAssign::Full));
782  ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
784  return false;
785  }
786 
787  if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
788  // The second half can also be passed via register.
789  State.addLoc(
790  CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
791  } else {
792  // The second half is passed via the stack, without additional alignment.
794  ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
796  }
797 
798  return false;
799 }
800 
801 // Implements the RISC-V calling convention. Returns true upon failure.
802 static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
803  CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
804  CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
805  unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
806  assert(XLen == 32 || XLen == 64);
807  MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
808  if (ValVT == MVT::f32) {
809  LocVT = MVT::i32;
810  LocInfo = CCValAssign::BCvt;
811  }
812 
813  // Any return value split in to more than two values can't be returned
814  // directly.
815  if (IsRet && ValNo > 1)
816  return true;
817 
818  // If this is a variadic argument, the RISC-V calling convention requires
819  // that it is assigned an 'even' or 'aligned' register if it has 8-byte
820  // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
821  // be used regardless of whether the original argument was split during
822  // legalisation or not. The argument will not be passed by registers if the
823  // original type is larger than 2*XLEN, so the register alignment rule does
824  // not apply.
825  unsigned TwoXLenInBytes = (2 * XLen) / 8;
826  if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
827  DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
828  unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
829  // Skip 'odd' register if necessary.
830  if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
831  State.AllocateReg(ArgGPRs);
832  }
833 
834  SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
835  SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
836  State.getPendingArgFlags();
837 
838  assert(PendingLocs.size() == PendingArgFlags.size() &&
839  "PendingLocs and PendingArgFlags out of sync");
840 
841  // Handle passing f64 on RV32D with a soft float ABI.
842  if (XLen == 32 && ValVT == MVT::f64) {
843  assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
844  "Can't lower f64 if it is split");
845  // Depending on available argument GPRS, f64 may be passed in a pair of
846  // GPRs, split between a GPR and the stack, or passed completely on the
847  // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
848  // cases.
849  unsigned Reg = State.AllocateReg(ArgGPRs);
850  LocVT = MVT::i32;
851  if (!Reg) {
852  unsigned StackOffset = State.AllocateStack(8, 8);
853  State.addLoc(
854  CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
855  return false;
856  }
857  if (!State.AllocateReg(ArgGPRs))
858  State.AllocateStack(4, 4);
859  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
860  return false;
861  }
862 
863  // Split arguments might be passed indirectly, so keep track of the pending
864  // values.
865  if (ArgFlags.isSplit() || !PendingLocs.empty()) {
866  LocVT = XLenVT;
867  LocInfo = CCValAssign::Indirect;
868  PendingLocs.push_back(
869  CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
870  PendingArgFlags.push_back(ArgFlags);
871  if (!ArgFlags.isSplitEnd()) {
872  return false;
873  }
874  }
875 
876  // If the split argument only had two elements, it should be passed directly
877  // in registers or on the stack.
878  if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
879  assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
880  // Apply the normal calling convention rules to the first half of the
881  // split argument.
882  CCValAssign VA = PendingLocs[0];
883  ISD::ArgFlagsTy AF = PendingArgFlags[0];
884  PendingLocs.clear();
885  PendingArgFlags.clear();
886  return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
887  ArgFlags);
888  }
889 
890  // Allocate to a register if possible, or else a stack slot.
891  unsigned Reg = State.AllocateReg(ArgGPRs);
892  unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
893 
894  // If we reach this point and PendingLocs is non-empty, we must be at the
895  // end of a split argument that must be passed indirectly.
896  if (!PendingLocs.empty()) {
897  assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
898  assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
899 
900  for (auto &It : PendingLocs) {
901  if (Reg)
902  It.convertToReg(Reg);
903  else
904  It.convertToMem(StackOffset);
905  State.addLoc(It);
906  }
907  PendingLocs.clear();
908  PendingArgFlags.clear();
909  return false;
910  }
911 
912  assert(LocVT == XLenVT && "Expected an XLenVT at this stage");
913 
914  if (Reg) {
915  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
916  return false;
917  }
918 
919  if (ValVT == MVT::f32) {
920  LocVT = MVT::f32;
921  LocInfo = CCValAssign::Full;
922  }
923  State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
924  return false;
925 }
926 
927 void RISCVTargetLowering::analyzeInputArgs(
928  MachineFunction &MF, CCState &CCInfo,
929  const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
930  unsigned NumArgs = Ins.size();
931  FunctionType *FType = MF.getFunction().getFunctionType();
932 
933  for (unsigned i = 0; i != NumArgs; ++i) {
934  MVT ArgVT = Ins[i].VT;
935  ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
936 
937  Type *ArgTy = nullptr;
938  if (IsRet)
939  ArgTy = FType->getReturnType();
940  else if (Ins[i].isOrigArg())
941  ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
942 
943  if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
944  ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
945  LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
946  << EVT(ArgVT).getEVTString() << '\n');
947  llvm_unreachable(nullptr);
948  }
949  }
950 }
951 
952 void RISCVTargetLowering::analyzeOutputArgs(
953  MachineFunction &MF, CCState &CCInfo,
954  const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
955  CallLoweringInfo *CLI) const {
956  unsigned NumArgs = Outs.size();
957 
958  for (unsigned i = 0; i != NumArgs; i++) {
959  MVT ArgVT = Outs[i].VT;
960  ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
961  Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
962 
963  if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
964  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
965  LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
966  << EVT(ArgVT).getEVTString() << "\n");
967  llvm_unreachable(nullptr);
968  }
969  }
970 }
971 
972 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
973 // values.
975  const CCValAssign &VA, const SDLoc &DL) {
976  switch (VA.getLocInfo()) {
977  default:
978  llvm_unreachable("Unexpected CCValAssign::LocInfo");
979  case CCValAssign::Full:
980  break;
981  case CCValAssign::BCvt:
982  Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
983  break;
984  }
985  return Val;
986 }
987 
988 // The caller is responsible for loading the full value if the argument is
989 // passed with CCValAssign::Indirect.
991  const CCValAssign &VA, const SDLoc &DL) {
993  MachineRegisterInfo &RegInfo = MF.getRegInfo();
994  EVT LocVT = VA.getLocVT();
995  SDValue Val;
996 
997  unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
998  RegInfo.addLiveIn(VA.getLocReg(), VReg);
999  Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1000 
1001  if (VA.getLocInfo() == CCValAssign::Indirect)
1002  return Val;
1003 
1004  return convertLocVTToValVT(DAG, Val, VA, DL);
1005 }
1006 
1008  const CCValAssign &VA, const SDLoc &DL) {
1009  EVT LocVT = VA.getLocVT();
1010 
1011  switch (VA.getLocInfo()) {
1012  default:
1013  llvm_unreachable("Unexpected CCValAssign::LocInfo");
1014  case CCValAssign::Full:
1015  break;
1016  case CCValAssign::BCvt:
1017  Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
1018  break;
1019  }
1020  return Val;
1021 }
1022 
1023 // The caller is responsible for loading the full value if the argument is
1024 // passed with CCValAssign::Indirect.
1026  const CCValAssign &VA, const SDLoc &DL) {
1027  MachineFunction &MF = DAG.getMachineFunction();
1028  MachineFrameInfo &MFI = MF.getFrameInfo();
1029  EVT LocVT = VA.getLocVT();
1030  EVT ValVT = VA.getValVT();
1032  int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
1033  VA.getLocMemOffset(), /*Immutable=*/true);
1034  SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1035  SDValue Val;
1036 
1038  switch (VA.getLocInfo()) {
1039  default:
1040  llvm_unreachable("Unexpected CCValAssign::LocInfo");
1041  case CCValAssign::Full:
1042  case CCValAssign::Indirect:
1043  ExtType = ISD::NON_EXTLOAD;
1044  break;
1045  }
1046  Val = DAG.getExtLoad(
1047  ExtType, DL, LocVT, Chain, FIN,
1049  return Val;
1050 }
1051 
1053  const CCValAssign &VA, const SDLoc &DL) {
1054  assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
1055  "Unexpected VA");
1056  MachineFunction &MF = DAG.getMachineFunction();
1057  MachineFrameInfo &MFI = MF.getFrameInfo();
1058  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1059 
1060  if (VA.isMemLoc()) {
1061  // f64 is passed on the stack.
1062  int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
1063  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1064  return DAG.getLoad(MVT::f64, DL, Chain, FIN,
1066  }
1067 
1068  assert(VA.isRegLoc() && "Expected register VA assignment");
1069 
1070  unsigned LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1071  RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
1072  SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
1073  SDValue Hi;
1074  if (VA.getLocReg() == RISCV::X17) {
1075  // Second half of f64 is passed on the stack.
1076  int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
1077  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1078  Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
1080  } else {
1081  // Second half of f64 is passed in another GPR.
1082  unsigned HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1083  RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
1084  Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
1085  }
1086  return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
1087 }
1088 
1089 // Transform physical registers into virtual registers.
1090 SDValue RISCVTargetLowering::LowerFormalArguments(
1091  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1092  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1093  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1094 
1095  switch (CallConv) {
1096  default:
1097  report_fatal_error("Unsupported calling convention");
1098  case CallingConv::C:
1099  case CallingConv::Fast:
1100  break;
1101  }
1102 
1103  MachineFunction &MF = DAG.getMachineFunction();
1104 
1105  const Function &Func = MF.getFunction();
1106  if (Func.hasFnAttribute("interrupt")) {
1107  if (!Func.arg_empty())
1109  "Functions with the interrupt attribute cannot have arguments!");
1110 
1111  StringRef Kind =
1112  MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1113 
1114  if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
1116  "Function interrupt attribute argument not supported!");
1117  }
1118 
1119  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1120  MVT XLenVT = Subtarget.getXLenVT();
1121  unsigned XLenInBytes = Subtarget.getXLen() / 8;
1122  // Used with vargs to acumulate store chains.
1123  std::vector<SDValue> OutChains;
1124 
1125  // Assign locations to all of the incoming arguments.
1127  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1128  analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
1129 
1130  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1131  CCValAssign &VA = ArgLocs[i];
1132  SDValue ArgValue;
1133  // Passing f64 on RV32D with a soft float ABI must be handled as a special
1134  // case.
1135  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
1136  ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
1137  else if (VA.isRegLoc())
1138  ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
1139  else
1140  ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
1141 
1142  if (VA.getLocInfo() == CCValAssign::Indirect) {
1143  // If the original argument was split and passed by reference (e.g. i128
1144  // on RV32), we need to load all parts of it here (using the same
1145  // address).
1146  InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1147  MachinePointerInfo()));
1148  unsigned ArgIndex = Ins[i].OrigArgIndex;
1149  assert(Ins[i].PartOffset == 0);
1150  while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
1151  CCValAssign &PartVA = ArgLocs[i + 1];
1152  unsigned PartOffset = Ins[i + 1].PartOffset;
1153  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1154  DAG.getIntPtrConstant(PartOffset, DL));
1155  InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1156  MachinePointerInfo()));
1157  ++i;
1158  }
1159  continue;
1160  }
1161  InVals.push_back(ArgValue);
1162  }
1163 
1164  if (IsVarArg) {
1166  unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
1167  const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1168  MachineFrameInfo &MFI = MF.getFrameInfo();
1169  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1171 
1172  // Offset of the first variable argument from stack pointer, and size of
1173  // the vararg save area. For now, the varargs save area is either zero or
1174  // large enough to hold a0-a7.
1175  int VaArgOffset, VarArgsSaveSize;
1176 
1177  // If all registers are allocated, then all varargs must be passed on the
1178  // stack and we don't need to save any argregs.
1179  if (ArgRegs.size() == Idx) {
1180  VaArgOffset = CCInfo.getNextStackOffset();
1181  VarArgsSaveSize = 0;
1182  } else {
1183  VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
1184  VaArgOffset = -VarArgsSaveSize;
1185  }
1186 
1187  // Record the frame index of the first variable argument
1188  // which is a value necessary to VASTART.
1189  int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1190  RVFI->setVarArgsFrameIndex(FI);
1191 
1192  // If saving an odd number of registers then create an extra stack slot to
1193  // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
1194  // offsets to even-numbered registered remain 2*XLEN-aligned.
1195  if (Idx % 2) {
1196  FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
1197  true);
1198  VarArgsSaveSize += XLenInBytes;
1199  }
1200 
1201  // Copy the integer registers that may have been used for passing varargs
1202  // to the vararg save area.
1203  for (unsigned I = Idx; I < ArgRegs.size();
1204  ++I, VaArgOffset += XLenInBytes) {
1205  const unsigned Reg = RegInfo.createVirtualRegister(RC);
1206  RegInfo.addLiveIn(ArgRegs[I], Reg);
1207  SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
1208  FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1209  SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1210  SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
1212  cast<StoreSDNode>(Store.getNode())
1213  ->getMemOperand()
1214  ->setValue((Value *)nullptr);
1215  OutChains.push_back(Store);
1216  }
1217  RVFI->setVarArgsSaveSize(VarArgsSaveSize);
1218  }
1219 
1220  // All stores are grouped in one node to allow the matching between
1221  // the size of Ins and InVals. This only happens for vararg functions.
1222  if (!OutChains.empty()) {
1223  OutChains.push_back(Chain);
1224  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1225  }
1226 
1227  return Chain;
1228 }
1229 
1230 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1231 /// for tail call optimization.
1232 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
1233 bool RISCVTargetLowering::IsEligibleForTailCallOptimization(
1234  CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
1235  const SmallVector<CCValAssign, 16> &ArgLocs) const {
1236 
1237  auto &Callee = CLI.Callee;
1238  auto CalleeCC = CLI.CallConv;
1239  auto IsVarArg = CLI.IsVarArg;
1240  auto &Outs = CLI.Outs;
1241  auto &Caller = MF.getFunction();
1242  auto CallerCC = Caller.getCallingConv();
1243 
1244  // Do not tail call opt functions with "disable-tail-calls" attribute.
1245  if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
1246  return false;
1247 
1248  // Exception-handling functions need a special set of instructions to
1249  // indicate a return to the hardware. Tail-calling another function would
1250  // probably break this.
1251  // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
1252  // should be expanded as new function attributes are introduced.
1253  if (Caller.hasFnAttribute("interrupt"))
1254  return false;
1255 
1256  // Do not tail call opt functions with varargs.
1257  if (IsVarArg)
1258  return false;
1259 
1260  // Do not tail call opt if the stack is used to pass parameters.
1261  if (CCInfo.getNextStackOffset() != 0)
1262  return false;
1263 
1264  // Do not tail call opt if any parameters need to be passed indirectly.
1265  // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
1266  // passed indirectly. So the address of the value will be passed in a
1267  // register, or if not available, then the address is put on the stack. In
1268  // order to pass indirectly, space on the stack often needs to be allocated
1269  // in order to store the value. In this case the CCInfo.getNextStackOffset()
1270  // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
1271  // are passed CCValAssign::Indirect.
1272  for (auto &VA : ArgLocs)
1273  if (VA.getLocInfo() == CCValAssign::Indirect)
1274  return false;
1275 
1276  // Do not tail call opt if either caller or callee uses struct return
1277  // semantics.
1278  auto IsCallerStructRet = Caller.hasStructRetAttr();
1279  auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
1280  if (IsCallerStructRet || IsCalleeStructRet)
1281  return false;
1282 
1283  // Externally-defined functions with weak linkage should not be
1284  // tail-called. The behaviour of branch instructions in this situation (as
1285  // used for tail calls) is implementation-defined, so we cannot rely on the
1286  // linker replacing the tail call with a return.
1287  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1288  const GlobalValue *GV = G->getGlobal();
1289  if (GV->hasExternalWeakLinkage())
1290  return false;
1291  }
1292 
1293  // The callee has to preserve all registers the caller needs to preserve.
1294  const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
1295  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1296  if (CalleeCC != CallerCC) {
1297  const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1298  if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1299  return false;
1300  }
1301 
1302  // Byval parameters hand the function a pointer directly into the stack area
1303  // we want to reuse during a tail call. Working around this *is* possible
1304  // but less efficient and uglier in LowerCall.
1305  for (auto &Arg : Outs)
1306  if (Arg.Flags.isByVal())
1307  return false;
1308 
1309  return true;
1310 }
1311 
1312 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
1313 // and output parameter nodes.
1314 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
1315  SmallVectorImpl<SDValue> &InVals) const {
1316  SelectionDAG &DAG = CLI.DAG;
1317  SDLoc &DL = CLI.DL;
1318  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1319  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1320  SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1321  SDValue Chain = CLI.Chain;
1322  SDValue Callee = CLI.Callee;
1323  bool &IsTailCall = CLI.IsTailCall;
1324  CallingConv::ID CallConv = CLI.CallConv;
1325  bool IsVarArg = CLI.IsVarArg;
1326  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1327  MVT XLenVT = Subtarget.getXLenVT();
1328 
1329  MachineFunction &MF = DAG.getMachineFunction();
1330 
1331  // Analyze the operands of the call, assigning locations to each operand.
1333  CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1334  analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
1335 
1336  // Check if it's really possible to do a tail call.
1337  if (IsTailCall)
1338  IsTailCall = IsEligibleForTailCallOptimization(ArgCCInfo, CLI, MF,
1339  ArgLocs);
1340 
1341  if (IsTailCall)
1342  ++NumTailCalls;
1343  else if (CLI.CS && CLI.CS.isMustTailCall())
1344  report_fatal_error("failed to perform tail call elimination on a call "
1345  "site marked musttail");
1346 
1347  // Get a count of how many bytes are to be pushed on the stack.
1348  unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1349 
1350  // Create local copies for byval args
1351  SmallVector<SDValue, 8> ByValArgs;
1352  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1353  ISD::ArgFlagsTy Flags = Outs[i].Flags;
1354  if (!Flags.isByVal())
1355  continue;
1356 
1357  SDValue Arg = OutVals[i];
1358  unsigned Size = Flags.getByValSize();
1359  unsigned Align = Flags.getByValAlign();
1360 
1361  int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
1362  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1363  SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
1364 
1365  Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
1366  /*IsVolatile=*/false,
1367  /*AlwaysInline=*/false,
1368  IsTailCall, MachinePointerInfo(),
1369  MachinePointerInfo());
1370  ByValArgs.push_back(FIPtr);
1371  }
1372 
1373  if (!IsTailCall)
1374  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
1375 
1376  // Copy argument values to their designated locations.
1378  SmallVector<SDValue, 8> MemOpChains;
1379  SDValue StackPtr;
1380  for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
1381  CCValAssign &VA = ArgLocs[i];
1382  SDValue ArgValue = OutVals[i];
1383  ISD::ArgFlagsTy Flags = Outs[i].Flags;
1384 
1385  // Handle passing f64 on RV32D with a soft float ABI as a special case.
1386  bool IsF64OnRV32DSoftABI =
1387  VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
1388  if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
1389  SDValue SplitF64 = DAG.getNode(
1390  RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
1391  SDValue Lo = SplitF64.getValue(0);
1392  SDValue Hi = SplitF64.getValue(1);
1393 
1394  unsigned RegLo = VA.getLocReg();
1395  RegsToPass.push_back(std::make_pair(RegLo, Lo));
1396 
1397  if (RegLo == RISCV::X17) {
1398  // Second half of f64 is passed on the stack.
1399  // Work out the address of the stack slot.
1400  if (!StackPtr.getNode())
1401  StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
1402  // Emit the store.
1403  MemOpChains.push_back(
1404  DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
1405  } else {
1406  // Second half of f64 is passed in another GPR.
1407  unsigned RegHigh = RegLo + 1;
1408  RegsToPass.push_back(std::make_pair(RegHigh, Hi));
1409  }
1410  continue;
1411  }
1412 
1413  // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
1414  // as any other MemLoc.
1415 
1416  // Promote the value if needed.
1417  // For now, only handle fully promoted and indirect arguments.
1418  if (VA.getLocInfo() == CCValAssign::Indirect) {
1419  // Store the argument in a stack slot and pass its address.
1420  SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
1421  int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1422  MemOpChains.push_back(
1423  DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1425  // If the original argument was split (e.g. i128), we need
1426  // to store all parts of it here (and pass just one address).
1427  unsigned ArgIndex = Outs[i].OrigArgIndex;
1428  assert(Outs[i].PartOffset == 0);
1429  while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
1430  SDValue PartValue = OutVals[i + 1];
1431  unsigned PartOffset = Outs[i + 1].PartOffset;
1432  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1433  DAG.getIntPtrConstant(PartOffset, DL));
1434  MemOpChains.push_back(
1435  DAG.getStore(Chain, DL, PartValue, Address,
1437  ++i;
1438  }
1439  ArgValue = SpillSlot;
1440  } else {
1441  ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
1442  }
1443 
1444  // Use local copy if it is a byval arg.
1445  if (Flags.isByVal())
1446  ArgValue = ByValArgs[j++];
1447 
1448  if (VA.isRegLoc()) {
1449  // Queue up the argument copies and emit them at the end.
1450  RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1451  } else {
1452  assert(VA.isMemLoc() && "Argument not register or memory");
1453  assert(!IsTailCall && "Tail call not allowed if stack is used "
1454  "for passing parameters");
1455 
1456  // Work out the address of the stack slot.
1457  if (!StackPtr.getNode())
1458  StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
1459  SDValue Address =
1460  DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1461  DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
1462 
1463  // Emit the store.
1464  MemOpChains.push_back(
1465  DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1466  }
1467  }
1468 
1469  // Join the stores, which are independent of one another.
1470  if (!MemOpChains.empty())
1471  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1472 
1473  SDValue Glue;
1474 
1475  // Build a sequence of copy-to-reg nodes, chained and glued together.
1476  for (auto &Reg : RegsToPass) {
1477  Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
1478  Glue = Chain.getValue(1);
1479  }
1480 
1481  // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
1482  // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
1483  // split it and then direct call can be matched by PseudoCALL.
1484  if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
1485  Callee = DAG.getTargetGlobalAddress(S->getGlobal(), DL, PtrVT, 0, 0);
1486  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1487  Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, 0);
1488  }
1489 
1490  // The first call operand is the chain and the second is the target address.
1492  Ops.push_back(Chain);
1493  Ops.push_back(Callee);
1494 
1495  // Add argument registers to the end of the list so that they are
1496  // known live into the call.
1497  for (auto &Reg : RegsToPass)
1498  Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1499 
1500  if (!IsTailCall) {
1501  // Add a register mask operand representing the call-preserved registers.
1502  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1503  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1504  assert(Mask && "Missing call preserved mask for calling convention");
1505  Ops.push_back(DAG.getRegisterMask(Mask));
1506  }
1507 
1508  // Glue the call to the argument copies, if any.
1509  if (Glue.getNode())
1510  Ops.push_back(Glue);
1511 
1512  // Emit the call.
1513  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1514 
1515  if (IsTailCall) {
1517  return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
1518  }
1519 
1520  Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
1521  Glue = Chain.getValue(1);
1522 
1523  // Mark the end of the call, which is glued to the call itself.
1524  Chain = DAG.getCALLSEQ_END(Chain,
1525  DAG.getConstant(NumBytes, DL, PtrVT, true),
1526  DAG.getConstant(0, DL, PtrVT, true),
1527  Glue, DL);
1528  Glue = Chain.getValue(1);
1529 
1530  // Assign locations to each value returned by this call.
1532  CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
1533  analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
1534 
1535  // Copy all of the result registers out of their specified physreg.
1536  for (auto &VA : RVLocs) {
1537  // Copy the value out
1538  SDValue RetValue =
1539  DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
1540  // Glue the RetValue to the end of the call sequence
1541  Chain = RetValue.getValue(1);
1542  Glue = RetValue.getValue(2);
1543 
1544  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
1545  assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
1546  SDValue RetValue2 =
1547  DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
1548  Chain = RetValue2.getValue(1);
1549  Glue = RetValue2.getValue(2);
1550  RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
1551  RetValue2);
1552  }
1553 
1554  RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
1555 
1556  InVals.push_back(RetValue);
1557  }
1558 
1559  return Chain;
1560 }
1561 
1562 bool RISCVTargetLowering::CanLowerReturn(
1563  CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
1564  const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1566  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1567  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1568  MVT VT = Outs[i].VT;
1569  ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
1570  if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags,
1571  CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
1572  return false;
1573  }
1574  return true;
1575 }
1576 
1577 SDValue
1578 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1579  bool IsVarArg,
1580  const SmallVectorImpl<ISD::OutputArg> &Outs,
1581  const SmallVectorImpl<SDValue> &OutVals,
1582  const SDLoc &DL, SelectionDAG &DAG) const {
1583  // Stores the assignment of the return value to a location.
1585 
1586  // Info about the registers and stack slot.
1587  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
1588  *DAG.getContext());
1589 
1590  analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
1591  nullptr);
1592 
1593  SDValue Glue;
1594  SmallVector<SDValue, 4> RetOps(1, Chain);
1595 
1596  // Copy the result values into the output registers.
1597  for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
1598  SDValue Val = OutVals[i];
1599  CCValAssign &VA = RVLocs[i];
1600  assert(VA.isRegLoc() && "Can only return in registers!");
1601 
1602  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
1603  // Handle returning f64 on RV32D with a soft float ABI.
1604  assert(VA.isRegLoc() && "Expected return via registers");
1606  DAG.getVTList(MVT::i32, MVT::i32), Val);
1607  SDValue Lo = SplitF64.getValue(0);
1608  SDValue Hi = SplitF64.getValue(1);
1609  unsigned RegLo = VA.getLocReg();
1610  unsigned RegHi = RegLo + 1;
1611  Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
1612  Glue = Chain.getValue(1);
1613  RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
1614  Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
1615  Glue = Chain.getValue(1);
1616  RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
1617  } else {
1618  // Handle a 'normal' return.
1619  Val = convertValVTToLocVT(DAG, Val, VA, DL);
1620  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
1621 
1622  // Guarantee that all emitted copies are stuck together.
1623  Glue = Chain.getValue(1);
1624  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1625  }
1626  }
1627 
1628  RetOps[0] = Chain; // Update chain.
1629 
1630  // Add the glue node if we have it.
1631  if (Glue.getNode()) {
1632  RetOps.push_back(Glue);
1633  }
1634 
1635  // Interrupt service routines use different return instructions.
1636  const Function &Func = DAG.getMachineFunction().getFunction();
1637  if (Func.hasFnAttribute("interrupt")) {
1638  if (!Func.getReturnType()->isVoidTy())
1640  "Functions with the interrupt attribute must have void return type!");
1641 
1642  MachineFunction &MF = DAG.getMachineFunction();
1643  StringRef Kind =
1644  MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1645 
1646  unsigned RetOpc;
1647  if (Kind == "user")
1648  RetOpc = RISCVISD::URET_FLAG;
1649  else if (Kind == "supervisor")
1650  RetOpc = RISCVISD::SRET_FLAG;
1651  else
1652  RetOpc = RISCVISD::MRET_FLAG;
1653 
1654  return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
1655  }
1656 
1657  return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
1658 }
1659 
1660 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
1661  switch ((RISCVISD::NodeType)Opcode) {
1663  break;
1664  case RISCVISD::RET_FLAG:
1665  return "RISCVISD::RET_FLAG";
1666  case RISCVISD::URET_FLAG:
1667  return "RISCVISD::URET_FLAG";
1668  case RISCVISD::SRET_FLAG:
1669  return "RISCVISD::SRET_FLAG";
1670  case RISCVISD::MRET_FLAG:
1671  return "RISCVISD::MRET_FLAG";
1672  case RISCVISD::CALL:
1673  return "RISCVISD::CALL";
1674  case RISCVISD::SELECT_CC:
1675  return "RISCVISD::SELECT_CC";
1677  return "RISCVISD::BuildPairF64";
1678  case RISCVISD::SplitF64:
1679  return "RISCVISD::SplitF64";
1680  case RISCVISD::TAIL:
1681  return "RISCVISD::TAIL";
1682  }
1683  return nullptr;
1684 }
1685 
1686 std::pair<unsigned, const TargetRegisterClass *>
1688  StringRef Constraint,
1689  MVT VT) const {
1690  // First, see if this is a constraint that directly corresponds to a
1691  // RISCV register class.
1692  if (Constraint.size() == 1) {
1693  switch (Constraint[0]) {
1694  case 'r':
1695  return std::make_pair(0U, &RISCV::GPRRegClass);
1696  default:
1697  break;
1698  }
1699  }
1700 
1701  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1702 }
1703 
1705  Instruction *Inst,
1706  AtomicOrdering Ord) const {
1707  if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
1708  return Builder.CreateFence(Ord);
1709  if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
1710  return Builder.CreateFence(AtomicOrdering::Release);
1711  return nullptr;
1712 }
1713 
1715  Instruction *Inst,
1716  AtomicOrdering Ord) const {
1717  if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
1718  return Builder.CreateFence(AtomicOrdering::Acquire);
1719  return nullptr;
1720 }
1721 
1723 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
1724  unsigned Size = AI->getType()->getPrimitiveSizeInBits();
1725  if (Size == 8 || Size == 16)
1728 }
1729 
1730 static Intrinsic::ID
1732  switch (BinOp) {
1733  default:
1734  llvm_unreachable("Unexpected AtomicRMW BinOp");
1735  case AtomicRMWInst::Xchg:
1737  case AtomicRMWInst::Add:
1739  case AtomicRMWInst::Sub:
1741  case AtomicRMWInst::Nand:
1743  case AtomicRMWInst::Max:
1745  case AtomicRMWInst::Min:
1747  case AtomicRMWInst::UMax:
1749  case AtomicRMWInst::UMin:
1751  }
1752 }
1753 
1754 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
1755  IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
1756  Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
1757  Value *Ordering = Builder.getInt32(static_cast<uint32_t>(AI->getOrdering()));
1758  Type *Tys[] = {AlignedAddr->getType()};
1759  Function *LrwOpScwLoop = Intrinsic::getDeclaration(
1760  AI->getModule(),
1762 
1763  // Must pass the shift amount needed to sign extend the loaded value prior
1764  // to performing a signed comparison for min/max. ShiftAmt is the number of
1765  // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
1766  // is the number of bits to left+right shift the value in order to
1767  // sign-extend.
1768  if (AI->getOperation() == AtomicRMWInst::Min ||
1769  AI->getOperation() == AtomicRMWInst::Max) {
1770  const DataLayout &DL = AI->getModule()->getDataLayout();
1771  unsigned ValWidth =
1773  Value *SextShamt = Builder.CreateSub(
1774  Builder.getInt32(Subtarget.getXLen() - ValWidth), ShiftAmt);
1775  return Builder.CreateCall(LrwOpScwLoop,
1776  {AlignedAddr, Incr, Mask, SextShamt, Ordering});
1777  }
1778 
1779  return Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
1780 }
1781 
1783 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
1784  AtomicCmpXchgInst *CI) const {
1785  unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
1786  if (Size == 8 || Size == 16)
1789 }
1790 
1791 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
1792  IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
1793  Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
1794  Value *Ordering = Builder.getInt32(static_cast<uint32_t>(Ord));
1795  Type *Tys[] = {AlignedAddr->getType()};
1796  Function *MaskedCmpXchg = Intrinsic::getDeclaration(
1798  return Builder.CreateCall(MaskedCmpXchg,
1799  {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
1800 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:571
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static MVT getIntegerVT(unsigned BitWidth)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
uint64_t getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Definition: DataLayout.h:427
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:594
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
const GlobalValue * getGlobal() const
*p = old <signed v ? old : v
Definition: Instructions.h:722
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:836
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:650
This class represents lattice values for constants.
Definition: AllocatorList.h:24
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:529
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:699
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const RISCVRegisterInfo * getRegisterInfo() const override
*p = old <unsigned v ? old : v
Definition: Instructions.h:726
*p = old >unsigned v ? old : v
Definition: Instructions.h:724
unsigned getValNo() const
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC)
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:321
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:363
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:437
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:692
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
*p = old >signed v ? old : v
Definition: Instructions.h:720
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:435
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
bool isMemLoc() const
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:210
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1135
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:456
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:840
A description of a memory reference used in the backend.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:371
const HexagonInstrInfo * TII
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
unsigned getXLen() const
Shift and rotation operations.
Definition: ISDOpcodes.h:410
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:745
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:743
static MachineBasicBlock * emitSplitF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB)
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:460
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:401
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
const BlockAddress * getBlockAddress() const
LocInfo getLocInfo() const
static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:695
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:398
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
SDValue getRegisterMask(const uint32_t *RegMask)
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:704
bool arg_empty() const
Definition: Function.h:699
SmallVectorImpl< CCValAssign > & getPendingLocs()
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it&#39;s free to truncate a value of type FromTy to type ToTy.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
bool hasStdExtA() const
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB)
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1031
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:959
virtual const TargetInstrInfo * getInstrInfo() const
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
amdgpu Simplify well known AMD library false Value * Callee
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1020
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Analysis containing CSE Info
Definition: CSEInfo.cpp:21
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getByValSize() const
unsigned getKillRegState(bool B)
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension. ...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
TargetInstrInfo - Interface to description of machine instruction set.
static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:169
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
unsigned getOrigAlign() const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
bool hasStdExtF() const
This is an important base class in LLVM.
Definition: Constant.h:42
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:728
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
const SDValue & getOperand(unsigned Num) const
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:934
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:824
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isAcquireOrStronger(AtomicOrdering ao)
bool hasStdExtM() const
unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
Definition: DataLayout.cpp:772
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:82
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:281
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:719
const MachineInstrBuilder & addFrameIndex(int Idx) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPositionIndependent() const
size_t size() const
Definition: SmallVector.h:53
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:774
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
The memory access writes data.
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isReleaseOrStronger(AtomicOrdering ao)
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:639
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
Value * getValOperand()
Definition: Instructions.h:800
static const MCPhysReg ArgGPRs[]
bool hasStdExtD() const
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:1044
static bool isVariableSDivUDivURem(SDValue Val)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:96
const DataFlowGraph & G
Definition: RDFGraph.cpp:211
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:413
Type * getReturnType() const
Definition: DerivedTypes.h:124
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:307
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
int64_t getImm() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:679
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:164
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
unsigned getByValAlign() const
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:420
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI)
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:471
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool is64Bit() const
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:436
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:638
Representation of each machine instruction.
Definition: MachineInstr.h:64
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:724
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:673
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Definition: IRBuilder.h:1437
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:705
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:206
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
StringRef getValueAsString() const
Return the attribute&#39;s value as a string.
Definition: Attributes.cpp:195
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:486
static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
static bool isVariableShift(SDValue Val)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
uint32_t Size
Definition: Profile.cpp:47
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp32(AtomicRMWInst::BinOp BinOp)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1974
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:608
SDValue getValue(unsigned R) const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
bool isRegLoc() const
const unsigned Kind
bool hasStdExtC() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
LLVM Value Representation.
Definition: Value.h:73
SDValue getRegister(unsigned Reg, EVT VT)
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
SDValue getValueType(EVT)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:331
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:443
Conversion operators.
Definition: ISDOpcodes.h:465
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
unsigned getLocReg() const
#define LLVM_DEBUG(X)
Definition: Debug.h:123
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
Function Alias Analysis false
static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
LLVMContext * getContext() const
Definition: SelectionDAG.h:407
Type * getElementType() const
Definition: DerivedTypes.h:486
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:622
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:380
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:623