LLVM  8.0.1
MachineIRBuilder.cpp
Go to the documentation of this file.
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the MachineIRBuidler class.
11 //===----------------------------------------------------------------------===//
14 
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
27  State.MF = &MF;
28  State.MBB = nullptr;
29  State.MRI = &MF.getRegInfo();
30  State.TII = MF.getSubtarget().getInstrInfo();
31  State.DL = DebugLoc();
33  State.Observer = nullptr;
34 }
35 
37  State.MBB = &MBB;
38  State.II = MBB.end();
39  assert(&getMF() == MBB.getParent() &&
40  "Basic block is in a different function");
41 }
42 
44  assert(MI.getParent() && "Instruction is not part of a basic block");
45  setMBB(*MI.getParent());
46  State.II = MI.getIterator();
47 }
48 
50 
53  assert(MBB.getParent() == &getMF() &&
54  "Basic block is in a different function");
55  State.MBB = &MBB;
56  State.II = II;
57 }
58 
59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
60  if (State.Observer)
61  State.Observer->createdInstr(*InsertedInstr);
62 }
63 
65  State.Observer = &Observer;
66 }
67 
69 
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
73 
75  return insertInstr(buildInstrNoInsert(Opcode));
76 }
77 
79  MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
80  return MIB;
81 }
82 
84  getMBB().insert(getInsertPt(), MIB);
85  recordInsertion(MIB);
86  return MIB;
87 }
88 
91  const MDNode *Expr) {
92  assert(isa<DILocalVariable>(Variable) && "not a variable");
93  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
94  assert(
95  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
96  "Expected inlined-at fields to agree");
97  return insertInstr(BuildMI(getMF(), getDL(),
98  getTII().get(TargetOpcode::DBG_VALUE),
99  /*IsIndirect*/ false, Reg, Variable, Expr));
100 }
101 
104  const MDNode *Expr) {
105  assert(isa<DILocalVariable>(Variable) && "not a variable");
106  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
107  assert(
108  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
109  "Expected inlined-at fields to agree");
110  return insertInstr(BuildMI(getMF(), getDL(),
111  getTII().get(TargetOpcode::DBG_VALUE),
112  /*IsIndirect*/ true, Reg, Variable, Expr));
113 }
114 
116  const MDNode *Variable,
117  const MDNode *Expr) {
118  assert(isa<DILocalVariable>(Variable) && "not a variable");
119  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
120  assert(
121  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
122  "Expected inlined-at fields to agree");
123  return buildInstr(TargetOpcode::DBG_VALUE)
124  .addFrameIndex(FI)
125  .addImm(0)
126  .addMetadata(Variable)
127  .addMetadata(Expr);
128 }
129 
131  const MDNode *Variable,
132  const MDNode *Expr) {
133  assert(isa<DILocalVariable>(Variable) && "not a variable");
134  assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
135  assert(
136  cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
137  "Expected inlined-at fields to agree");
138  auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
139  if (auto *CI = dyn_cast<ConstantInt>(&C)) {
140  if (CI->getBitWidth() > 64)
141  MIB.addCImm(CI);
142  else
143  MIB.addImm(CI->getZExtValue());
144  } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
145  MIB.addFPImm(CFP);
146  } else {
147  // Insert %noreg if we didn't find a usable constant and had to drop it.
148  MIB.addReg(0U);
149  }
150 
151  return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
152 }
153 
155  assert(isa<DILabel>(Label) && "not a label");
156  assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
157  "Expected inlined-at fields to agree");
158  auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
159 
160  return MIB.addMetadata(Label);
161 }
162 
164  assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
165  return buildInstr(TargetOpcode::G_FRAME_INDEX)
166  .addDef(Res)
167  .addFrameIndex(Idx);
168 }
169 
171  const GlobalValue *GV) {
172  assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
173  assert(getMRI()->getType(Res).getAddressSpace() ==
174  GV->getType()->getAddressSpace() &&
175  "address space mismatch");
176 
177  return buildInstr(TargetOpcode::G_GLOBAL_VALUE)
178  .addDef(Res)
179  .addGlobalAddress(GV);
180 }
181 
182 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
183  const LLT &Op1) {
184  assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
185  assert((Res == Op0 && Res == Op1) && "type mismatch");
186 }
187 
189  unsigned Op1) {
190  assert(getMRI()->getType(Res).isPointer() &&
191  getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
192  assert(getMRI()->getType(Op1).isScalar() && "invalid offset type");
193 
194  return buildInstr(TargetOpcode::G_GEP)
195  .addDef(Res)
196  .addUse(Op0)
197  .addUse(Op1);
198 }
199 
201 MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
202  const LLT &ValueTy, uint64_t Value) {
203  assert(Res == 0 && "Res is a result argument");
204  assert(ValueTy.isScalar() && "invalid offset type");
205 
206  if (Value == 0) {
207  Res = Op0;
208  return None;
209  }
210 
212  unsigned TmpReg = getMRI()->createGenericVirtualRegister(ValueTy);
213 
214  buildConstant(TmpReg, Value);
215  return buildGEP(Res, Op0, TmpReg);
216 }
217 
219  uint32_t NumBits) {
220  assert(getMRI()->getType(Res).isPointer() &&
221  getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
222 
223  return buildInstr(TargetOpcode::G_PTR_MASK)
224  .addDef(Res)
225  .addUse(Op0)
226  .addImm(NumBits);
227 }
228 
230  return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
231 }
232 
234  assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
235  return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
236 }
237 
239  const SrcOp &Op) {
240  return buildInstr(TargetOpcode::COPY, Res, Op);
241 }
242 
244  const ConstantInt &Val) {
245  LLT Ty = Res.getLLTTy(*getMRI());
246 
247  assert((Ty.isScalar() || Ty.isPointer()) && "invalid operand type");
248 
249  const ConstantInt *NewVal = &Val;
250  if (Ty.getSizeInBits() != Val.getBitWidth())
252  Val.getValue().sextOrTrunc(Ty.getSizeInBits()));
253 
254  auto MIB = buildInstr(TargetOpcode::G_CONSTANT);
255  Res.addDefToMIB(*getMRI(), MIB);
256  MIB.addCImm(NewVal);
257  return MIB;
258 }
259 
261  int64_t Val) {
262  auto IntN = IntegerType::get(getMF().getFunction().getContext(),
263  Res.getLLTTy(*getMRI()).getSizeInBits());
264  ConstantInt *CI = ConstantInt::get(IntN, Val, true);
265  return buildConstant(Res, *CI);
266 }
267 
269  const ConstantFP &Val) {
270  assert(Res.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
271 
272  auto MIB = buildInstr(TargetOpcode::G_FCONSTANT);
273  Res.addDefToMIB(*getMRI(), MIB);
274  MIB.addFPImm(&Val);
275  return MIB;
276 }
277 
279  double Val) {
280  LLT DstTy = Res.getLLTTy(*getMRI());
281  auto &Ctx = getMF().getFunction().getContext();
282  auto *CFP =
284  return buildFConstant(Res, *CFP);
285 }
286 
288  MachineBasicBlock &Dest) {
289  assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
290 
291  return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
292 }
293 
295  MachineMemOperand &MMO) {
296  return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
297 }
298 
300  unsigned Res,
301  unsigned Addr,
302  MachineMemOperand &MMO) {
303  assert(getMRI()->getType(Res).isValid() && "invalid operand type");
304  assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
305 
306  return buildInstr(Opcode)
307  .addDef(Res)
308  .addUse(Addr)
309  .addMemOperand(&MMO);
310 }
311 
313  MachineMemOperand &MMO) {
314  assert(getMRI()->getType(Val).isValid() && "invalid operand type");
315  assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
316 
317  return buildInstr(TargetOpcode::G_STORE)
318  .addUse(Val)
319  .addUse(Addr)
320  .addMemOperand(&MMO);
321 }
322 
324  const DstOp &CarryOut,
325  const SrcOp &Op0,
326  const SrcOp &Op1,
327  const SrcOp &CarryIn) {
328  return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
329  {Op0, Op1, CarryIn});
330 }
331 
333  const SrcOp &Op) {
334  return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
335 }
336 
338  const SrcOp &Op) {
339  return buildInstr(TargetOpcode::G_SEXT, Res, Op);
340 }
341 
343  const SrcOp &Op) {
344  return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
345 }
346 
348  const DstOp &Res,
349  const SrcOp &Op) {
350  assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
351  TargetOpcode::G_SEXT == ExtOpc) &&
352  "Expecting Extending Opc");
353  assert(Res.getLLTTy(*getMRI()).isScalar() ||
354  Res.getLLTTy(*getMRI()).isVector());
355  assert(Res.getLLTTy(*getMRI()).isScalar() ==
356  Op.getLLTTy(*getMRI()).isScalar());
357 
358  unsigned Opcode = TargetOpcode::COPY;
359  if (Res.getLLTTy(*getMRI()).getSizeInBits() >
360  Op.getLLTTy(*getMRI()).getSizeInBits())
361  Opcode = ExtOpc;
362  else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
363  Op.getLLTTy(*getMRI()).getSizeInBits())
364  Opcode = TargetOpcode::G_TRUNC;
365  else
366  assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
367 
368  return buildInstr(Opcode, Res, Op);
369 }
370 
372  const SrcOp &Op) {
373  return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
374 }
375 
377  const SrcOp &Op) {
378  return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
379 }
380 
382  const SrcOp &Op) {
383  return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
384 }
385 
387  const SrcOp &Src) {
388  LLT SrcTy = Src.getLLTTy(*getMRI());
389  LLT DstTy = Dst.getLLTTy(*getMRI());
390  if (SrcTy == DstTy)
391  return buildCopy(Dst, Src);
392 
393  unsigned Opcode;
394  if (SrcTy.isPointer() && DstTy.isScalar())
395  Opcode = TargetOpcode::G_PTRTOINT;
396  else if (DstTy.isPointer() && SrcTy.isScalar())
397  Opcode = TargetOpcode::G_INTTOPTR;
398  else {
399  assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
400  Opcode = TargetOpcode::G_BITCAST;
401  }
402 
403  return buildInstr(Opcode, Dst, Src);
404 }
405 
407  uint64_t Index) {
408 #ifndef NDEBUG
409  assert(getMRI()->getType(Src).isValid() && "invalid operand type");
410  assert(getMRI()->getType(Res).isValid() && "invalid operand type");
411  assert(Index + getMRI()->getType(Res).getSizeInBits() <=
412  getMRI()->getType(Src).getSizeInBits() &&
413  "extracting off end of register");
414 #endif
415 
416  if (getMRI()->getType(Res).getSizeInBits() ==
417  getMRI()->getType(Src).getSizeInBits()) {
418  assert(Index == 0 && "insertion past the end of a register");
419  return buildCast(Res, Src);
420  }
421 
422  return buildInstr(TargetOpcode::G_EXTRACT)
423  .addDef(Res)
424  .addUse(Src)
425  .addImm(Index);
426 }
427 
429  ArrayRef<uint64_t> Indices) {
430 #ifndef NDEBUG
431  assert(Ops.size() == Indices.size() && "incompatible args");
432  assert(!Ops.empty() && "invalid trivial sequence");
433  assert(std::is_sorted(Indices.begin(), Indices.end()) &&
434  "sequence offsets must be in ascending order");
435 
436  assert(getMRI()->getType(Res).isValid() && "invalid operand type");
437  for (auto Op : Ops)
438  assert(getMRI()->getType(Op).isValid() && "invalid operand type");
439 #endif
440 
441  LLT ResTy = getMRI()->getType(Res);
442  LLT OpTy = getMRI()->getType(Ops[0]);
443  unsigned OpSize = OpTy.getSizeInBits();
444  bool MaybeMerge = true;
445  for (unsigned i = 0; i < Ops.size(); ++i) {
446  if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
447  MaybeMerge = false;
448  break;
449  }
450  }
451 
452  if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
453  buildMerge(Res, Ops);
454  return;
455  }
456 
457  unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
458  buildUndef(ResIn);
459 
460  for (unsigned i = 0; i < Ops.size(); ++i) {
461  unsigned ResOut = i + 1 == Ops.size()
462  ? Res
464  buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
465  ResIn = ResOut;
466  }
467 }
468 
470  return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
471 }
472 
474  ArrayRef<unsigned> Ops) {
475  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
476  // we need some temporary storage for the DstOp objects. Here we use a
477  // sufficiently large SmallVector to not go through the heap.
478  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
479  return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
480 }
481 
483  const SrcOp &Op) {
484  // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
485  // we need some temporary storage for the DstOp objects. Here we use a
486  // sufficiently large SmallVector to not go through the heap.
487  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
488  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
489 }
490 
492  const SrcOp &Op) {
493  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<DstOp>,
494  // we need some temporary storage for the DstOp objects. Here we use a
495  // sufficiently large SmallVector to not go through the heap.
496  SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
497  return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
498 }
499 
501  ArrayRef<unsigned> Ops) {
502  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
503  // we need some temporary storage for the DstOp objects. Here we use a
504  // sufficiently large SmallVector to not go through the heap.
505  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
506  return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
507 }
508 
511  ArrayRef<unsigned> Ops) {
512  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
513  // we need some temporary storage for the DstOp objects. Here we use a
514  // sufficiently large SmallVector to not go through the heap.
515  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
516  return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
517 }
518 
521  // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
522  // we need some temporary storage for the DstOp objects. Here we use a
523  // sufficiently large SmallVector to not go through the heap.
524  SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
525  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
526 }
527 
529  unsigned Op, unsigned Index) {
530  assert(Index + getMRI()->getType(Op).getSizeInBits() <=
531  getMRI()->getType(Res).getSizeInBits() &&
532  "insertion past the end of a register");
533 
534  if (getMRI()->getType(Res).getSizeInBits() ==
535  getMRI()->getType(Op).getSizeInBits()) {
536  return buildCast(Res, Op);
537  }
538 
539  return buildInstr(TargetOpcode::G_INSERT)
540  .addDef(Res)
541  .addUse(Src)
542  .addUse(Op)
543  .addImm(Index);
544 }
545 
547  unsigned Res,
548  bool HasSideEffects) {
549  auto MIB =
550  buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
551  : TargetOpcode::G_INTRINSIC);
552  if (Res)
553  MIB.addDef(Res);
554  MIB.addIntrinsicID(ID);
555  return MIB;
556 }
557 
559  const SrcOp &Op) {
560  return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
561 }
562 
564  const SrcOp &Op) {
565  return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op);
566 }
567 
569  const DstOp &Res,
570  const SrcOp &Op0,
571  const SrcOp &Op1) {
572  return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
573 }
574 
576  const DstOp &Res,
577  const SrcOp &Op0,
578  const SrcOp &Op1) {
579 
580  return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1});
581 }
582 
584  const SrcOp &Tst,
585  const SrcOp &Op0,
586  const SrcOp &Op1) {
587 
588  return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1});
589 }
590 
593  const SrcOp &Elt, const SrcOp &Idx) {
594  return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
595 }
596 
599  const SrcOp &Idx) {
600  return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
601 }
602 
604  unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal,
605  unsigned NewVal, MachineMemOperand &MMO) {
606 #ifndef NDEBUG
607  LLT OldValResTy = getMRI()->getType(OldValRes);
608  LLT SuccessResTy = getMRI()->getType(SuccessRes);
609  LLT AddrTy = getMRI()->getType(Addr);
610  LLT CmpValTy = getMRI()->getType(CmpVal);
611  LLT NewValTy = getMRI()->getType(NewVal);
612  assert(OldValResTy.isScalar() && "invalid operand type");
613  assert(SuccessResTy.isScalar() && "invalid operand type");
614  assert(AddrTy.isPointer() && "invalid operand type");
615  assert(CmpValTy.isValid() && "invalid operand type");
616  assert(NewValTy.isValid() && "invalid operand type");
617  assert(OldValResTy == CmpValTy && "type mismatch");
618  assert(OldValResTy == NewValTy && "type mismatch");
619 #endif
620 
621  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
622  .addDef(OldValRes)
623  .addDef(SuccessRes)
624  .addUse(Addr)
625  .addUse(CmpVal)
626  .addUse(NewVal)
627  .addMemOperand(&MMO);
628 }
629 
631 MachineIRBuilder::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
632  unsigned CmpVal, unsigned NewVal,
633  MachineMemOperand &MMO) {
634 #ifndef NDEBUG
635  LLT OldValResTy = getMRI()->getType(OldValRes);
636  LLT AddrTy = getMRI()->getType(Addr);
637  LLT CmpValTy = getMRI()->getType(CmpVal);
638  LLT NewValTy = getMRI()->getType(NewVal);
639  assert(OldValResTy.isScalar() && "invalid operand type");
640  assert(AddrTy.isPointer() && "invalid operand type");
641  assert(CmpValTy.isValid() && "invalid operand type");
642  assert(NewValTy.isValid() && "invalid operand type");
643  assert(OldValResTy == CmpValTy && "type mismatch");
644  assert(OldValResTy == NewValTy && "type mismatch");
645 #endif
646 
647  return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
648  .addDef(OldValRes)
649  .addUse(Addr)
650  .addUse(CmpVal)
651  .addUse(NewVal)
652  .addMemOperand(&MMO);
653 }
654 
656  unsigned OldValRes,
657  unsigned Addr,
658  unsigned Val,
659  MachineMemOperand &MMO) {
660 #ifndef NDEBUG
661  LLT OldValResTy = getMRI()->getType(OldValRes);
662  LLT AddrTy = getMRI()->getType(Addr);
663  LLT ValTy = getMRI()->getType(Val);
664  assert(OldValResTy.isScalar() && "invalid operand type");
665  assert(AddrTy.isPointer() && "invalid operand type");
666  assert(ValTy.isValid() && "invalid operand type");
667  assert(OldValResTy == ValTy && "type mismatch");
668 #endif
669 
670  return buildInstr(Opcode)
671  .addDef(OldValRes)
672  .addUse(Addr)
673  .addUse(Val)
674  .addMemOperand(&MMO);
675 }
676 
678 MachineIRBuilder::buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
679  unsigned Val, MachineMemOperand &MMO) {
680  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
681  MMO);
682 }
684 MachineIRBuilder::buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
685  unsigned Val, MachineMemOperand &MMO) {
686  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
687  MMO);
688 }
690 MachineIRBuilder::buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
691  unsigned Val, MachineMemOperand &MMO) {
692  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
693  MMO);
694 }
696 MachineIRBuilder::buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
697  unsigned Val, MachineMemOperand &MMO) {
698  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
699  MMO);
700 }
702 MachineIRBuilder::buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
703  unsigned Val, MachineMemOperand &MMO) {
704  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
705  MMO);
706 }
708  unsigned Addr,
709  unsigned Val,
710  MachineMemOperand &MMO) {
711  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
712  MMO);
713 }
715 MachineIRBuilder::buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
716  unsigned Val, MachineMemOperand &MMO) {
717  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
718  MMO);
719 }
721 MachineIRBuilder::buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
722  unsigned Val, MachineMemOperand &MMO) {
723  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
724  MMO);
725 }
727 MachineIRBuilder::buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
728  unsigned Val, MachineMemOperand &MMO) {
729  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
730  MMO);
731 }
733 MachineIRBuilder::buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
734  unsigned Val, MachineMemOperand &MMO) {
735  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
736  MMO);
737 }
739 MachineIRBuilder::buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
740  unsigned Val, MachineMemOperand &MMO) {
741  return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
742  MMO);
743 }
744 
747 #ifndef NDEBUG
748  assert(getMRI()->getType(Res).isPointer() && "invalid res type");
749 #endif
750 
751  return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
752 }
753 
754 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
755  bool IsExtend) {
756 #ifndef NDEBUG
757  if (DstTy.isVector()) {
758  assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
759  assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
760  "different number of elements in a trunc/ext");
761  } else
762  assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
763 
764  if (IsExtend)
765  assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
766  "invalid narrowing extend");
767  else
768  assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
769  "invalid widening trunc");
770 #endif
771 }
772 
773 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
774  const LLT &Op0Ty, const LLT &Op1Ty) {
775 #ifndef NDEBUG
776  assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
777  "invalid operand type");
778  assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
779  if (ResTy.isScalar() || ResTy.isPointer())
780  assert(TstTy.isScalar() && "type mismatch");
781  else
782  assert((TstTy.isScalar() ||
783  (TstTy.isVector() &&
784  TstTy.getNumElements() == Op0Ty.getNumElements())) &&
785  "type mismatch");
786 #endif
787 }
788 
790  ArrayRef<DstOp> DstOps,
791  ArrayRef<SrcOp> SrcOps,
792  Optional<unsigned> Flags) {
793  switch (Opc) {
794  default:
795  break;
796  case TargetOpcode::G_SELECT: {
797  assert(DstOps.size() == 1 && "Invalid select");
798  assert(SrcOps.size() == 3 && "Invalid select");
800  DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
801  SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
802  break;
803  }
804  case TargetOpcode::G_ADD:
805  case TargetOpcode::G_AND:
806  case TargetOpcode::G_ASHR:
807  case TargetOpcode::G_LSHR:
808  case TargetOpcode::G_MUL:
809  case TargetOpcode::G_OR:
810  case TargetOpcode::G_SHL:
811  case TargetOpcode::G_SUB:
812  case TargetOpcode::G_XOR:
813  case TargetOpcode::G_UDIV:
814  case TargetOpcode::G_SDIV:
815  case TargetOpcode::G_UREM:
816  case TargetOpcode::G_SREM: {
817  // All these are binary ops.
818  assert(DstOps.size() == 1 && "Invalid Dst");
819  assert(SrcOps.size() == 2 && "Invalid Srcs");
820  validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
821  SrcOps[0].getLLTTy(*getMRI()),
822  SrcOps[1].getLLTTy(*getMRI()));
823  break;
824  case TargetOpcode::G_SEXT:
825  case TargetOpcode::G_ZEXT:
826  case TargetOpcode::G_ANYEXT:
827  assert(DstOps.size() == 1 && "Invalid Dst");
828  assert(SrcOps.size() == 1 && "Invalid Srcs");
829  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
830  SrcOps[0].getLLTTy(*getMRI()), true);
831  break;
832  case TargetOpcode::G_TRUNC:
833  case TargetOpcode::G_FPTRUNC:
834  assert(DstOps.size() == 1 && "Invalid Dst");
835  assert(SrcOps.size() == 1 && "Invalid Srcs");
836  validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
837  SrcOps[0].getLLTTy(*getMRI()), false);
838  break;
839  }
840  case TargetOpcode::COPY:
841  assert(DstOps.size() == 1 && "Invalid Dst");
842  assert(SrcOps.size() == 1 && "Invalid Srcs");
843  assert(DstOps[0].getLLTTy(*getMRI()) == LLT() ||
844  SrcOps[0].getLLTTy(*getMRI()) == LLT() ||
845  DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI()));
846  break;
847  case TargetOpcode::G_FCMP:
848  case TargetOpcode::G_ICMP: {
849  assert(DstOps.size() == 1 && "Invalid Dst Operands");
850  assert(SrcOps.size() == 3 && "Invalid Src Operands");
851  // For F/ICMP, the first src operand is the predicate, followed by
852  // the two comparands.
853  assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
854  "Expecting predicate");
855  assert([&]() -> bool {
856  CmpInst::Predicate Pred = SrcOps[0].getPredicate();
857  return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
858  : CmpInst::isFPPredicate(Pred);
859  }() && "Invalid predicate");
860  assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
861  "Type mismatch");
862  assert([&]() -> bool {
863  LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
864  LLT DstTy = DstOps[0].getLLTTy(*getMRI());
865  if (Op0Ty.isScalar() || Op0Ty.isPointer())
866  return DstTy.isScalar();
867  else
868  return DstTy.isVector() &&
869  DstTy.getNumElements() == Op0Ty.getNumElements();
870  }() && "Type Mismatch");
871  break;
872  }
873  case TargetOpcode::G_UNMERGE_VALUES: {
874  assert(!DstOps.empty() && "Invalid trivial sequence");
875  assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
876  assert(std::all_of(DstOps.begin(), DstOps.end(),
877  [&, this](const DstOp &Op) {
878  return Op.getLLTTy(*getMRI()) ==
879  DstOps[0].getLLTTy(*getMRI());
880  }) &&
881  "type mismatch in output list");
882  assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
883  SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
884  "input operands do not cover output register");
885  break;
886  }
887  case TargetOpcode::G_MERGE_VALUES: {
888  assert(!SrcOps.empty() && "invalid trivial sequence");
889  assert(DstOps.size() == 1 && "Invalid Dst");
890  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
891  [&, this](const SrcOp &Op) {
892  return Op.getLLTTy(*getMRI()) ==
893  SrcOps[0].getLLTTy(*getMRI());
894  }) &&
895  "type mismatch in input list");
896  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
897  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
898  "input operands do not cover output register");
899  if (SrcOps.size() == 1)
900  return buildCast(DstOps[0], SrcOps[0]);
901  if (DstOps[0].getLLTTy(*getMRI()).isVector())
902  return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
903  break;
904  }
905  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
906  assert(DstOps.size() == 1 && "Invalid Dst size");
907  assert(SrcOps.size() == 2 && "Invalid Src size");
908  assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
909  assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
910  DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
911  "Invalid operand type");
912  assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
913  assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
914  DstOps[0].getLLTTy(*getMRI()) &&
915  "Type mismatch");
916  break;
917  }
918  case TargetOpcode::G_INSERT_VECTOR_ELT: {
919  assert(DstOps.size() == 1 && "Invalid dst size");
920  assert(SrcOps.size() == 3 && "Invalid src size");
921  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
922  SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
923  assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
924  SrcOps[1].getLLTTy(*getMRI()) &&
925  "Type mismatch");
926  assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
927  assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
928  SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
929  "Type mismatch");
930  break;
931  }
932  case TargetOpcode::G_BUILD_VECTOR: {
933  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
934  "Must have at least 2 operands");
935  assert(DstOps.size() == 1 && "Invalid DstOps");
936  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
937  "Res type must be a vector");
938  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
939  [&, this](const SrcOp &Op) {
940  return Op.getLLTTy(*getMRI()) ==
941  SrcOps[0].getLLTTy(*getMRI());
942  }) &&
943  "type mismatch in input list");
944  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
945  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
946  "input scalars do not exactly cover the outpur vector register");
947  break;
948  }
949  case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
950  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
951  "Must have at least 2 operands");
952  assert(DstOps.size() == 1 && "Invalid DstOps");
953  assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
954  "Res type must be a vector");
955  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
956  [&, this](const SrcOp &Op) {
957  return Op.getLLTTy(*getMRI()) ==
958  SrcOps[0].getLLTTy(*getMRI());
959  }) &&
960  "type mismatch in input list");
961  if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
962  DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
963  return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
964  break;
965  }
966  case TargetOpcode::G_CONCAT_VECTORS: {
967  assert(DstOps.size() == 1 && "Invalid DstOps");
968  assert((!SrcOps.empty() || SrcOps.size() < 2) &&
969  "Must have at least 2 operands");
970  assert(std::all_of(SrcOps.begin(), SrcOps.end(),
971  [&, this](const SrcOp &Op) {
972  return (Op.getLLTTy(*getMRI()).isVector() &&
973  Op.getLLTTy(*getMRI()) ==
974  SrcOps[0].getLLTTy(*getMRI()));
975  }) &&
976  "type mismatch in input list");
977  assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
978  DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
979  "input vectors do not exactly cover the outpur vector register");
980  break;
981  }
982  case TargetOpcode::G_UADDE: {
983  assert(DstOps.size() == 2 && "Invalid no of dst operands");
984  assert(SrcOps.size() == 3 && "Invalid no of src operands");
985  assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
986  assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
987  (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
988  "Invalid operand");
989  assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
990  assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
991  "type mismatch");
992  break;
993  }
994  }
995 
996  auto MIB = buildInstr(Opc);
997  for (const DstOp &Op : DstOps)
998  Op.addDefToMIB(*getMRI(), MIB);
999  for (const SrcOp &Op : SrcOps)
1000  Op.addSrcToMIB(MIB);
1001  if (Flags)
1002  MIB->setFlags(*Flags);
1003  return MIB;
1004 }
bool isFPPredicate() const
Definition: InstrTypes.h:738
uint64_t CallInst * C
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
The CSE Analysis object.
Definition: CSEInfo.h:69
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
This class represents lattice values for constants.
Definition: AllocatorList.h:24
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
iterator begin() const
Definition: ArrayRef.h:137
bool isScalar() const
MachineInstrBuilder buildAtomicRMWSub(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
GISelChangeObserver * Observer
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
unsigned Reg
MachineInstrBuilder buildAtomicRMWXor(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1186
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:864
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index)
Build and insert `Res0, ...
void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty, const LLT &Op1Ty)
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:143
MachineInstrBuilder buildAtomicRMWNand(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool isVector() const
void setMF(MachineFunction &MF)
The address of a basic block.
Definition: Constants.h:840
MachineInstrBuilder buildBlockAddress(unsigned Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
A description of a memory reference used in the backend.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut, const SrcOp &Op0, const SrcOp &Op1, const SrcOp &CarryIn)
Build and insert Res, CarryOut = G_UADDE Op0, Op1, CarryIn.
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend)
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineBasicBlock::iterator II
void recordInsertion(MachineInstr *MI) const
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:225
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:884
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:138
virtual const TargetInstrInfo * getInstrInfo() const
MachineInstrBuilder buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:221
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Analysis containing CSE Info
Definition: CSEInfo.cpp:21
void setChangeObserver(GISelChangeObserver &Observer)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineRegisterInfo * getMRI()
Getter for MRI.
Abstract class that contains various methods for clients to notify about changes. ...
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_FPTRUNC Op.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned char TargetFlags=0) const
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
This is an important base class in LLVM.
Definition: Constant.h:42
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
virtual void createdInstr(MachineInstr &MI)=0
An instruction was created and inserted into the function.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
MachineInstrBuilder buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:646
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:495
DebugLoc DL
Debug location to be set to any instruction we create.
self_iterator getIterator()
Definition: ilist_node.h:82
const MachineInstrBuilder & addFrameIndex(int Idx) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:193
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
static wasm::ValType getType(const TargetRegisterClass *RC)
MachineInstrBuilder buildLoadInstr(unsigned Opcode, unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:240
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
void buildSequence(unsigned Res, ArrayRef< unsigned > Ops, ArrayRef< uint64_t > Indices)
Build and insert instructions to put Ops together at the specified p Indices to form a larger registe...
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
bool isVector(MCInstrInfo const &MCII, MCInst const &MCI)
iterator end() const
Definition: ArrayRef.h:138
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
const TargetInstrInfo & getTII()
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:622
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
LegalityPredicate isScalar(unsigned TypeIdx)
True iff the specified type index is a scalar.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:685
LLT getLLTTy(const MachineRegisterInfo &MRI) const
const Function & getFunction() const
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
void setCSEInfo(GISelCSEInfo *Info)
This file declares the MachineIRBuilder class.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
bool isIntPredicate() const
Definition: InstrTypes.h:739
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
bool isPointer() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
LLVM Value Representation.
Definition: Value.h:73
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
MachineInstrBuilder buildAtomicRMWMin(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineFunction * MF
MachineFunction under construction.
MachineInstrBuilder buildAtomicRMWOr(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildAtomicRMWMax(unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:274
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.