LLVM  8.0.1
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/ScopeExit.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/Analysis.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfo.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Debug.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <iterator>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 #define DEBUG_TYPE "irtranslator"
76 
77 using namespace llvm;
78 
79 static cl::opt<bool>
80  EnableCSEInIRTranslator("enable-cse-in-irtranslator",
81  cl::desc("Should enable CSE in irtranslator"),
82  cl::Optional, cl::init(false));
83 char IRTranslator::ID = 0;
84 
85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
86  false, false)
89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
90  false, false)
91 
97 
98  // Print the function name explicitly if we don't have a debug location (which
99  // makes the diagnostic less useful) or if we're going to emit a raw error.
100  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
101  R << (" (in function: " + MF.getName() + ")").str();
102 
103  if (TPC.isGlobalISelAbortEnabled())
104  report_fatal_error(R.getMsg());
105  else
106  ORE.emit(R);
107 }
108 
111 }
112 
113 #ifndef NDEBUG
114 namespace {
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118  const Instruction *CurrInst = nullptr;
119 
120 public:
121  DILocationVerifier() = default;
122  ~DILocationVerifier() = default;
123 
124  const Instruction *getCurrentInst() const { return CurrInst; }
125  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
126 
127  void erasingInstr(MachineInstr &MI) override {}
128  void changingInstr(MachineInstr &MI) override {}
129  void changedInstr(MachineInstr &MI) override {}
130 
131  void createdInstr(MachineInstr &MI) override {
132  assert(getCurrentInst() && "Inserted instruction without a current MI");
133 
134  // Only print the check message if we're actually checking it.
135 #ifndef NDEBUG
136  LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137  << " was copied to " << MI);
138 #endif
139  assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
140  "Line info was not transferred to all instructions");
141  }
142 };
143 } // namespace
144 #endif // ifndef NDEBUG
145 
146 
153 }
154 
155 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
156  SmallVectorImpl<LLT> &ValueTys,
158  uint64_t StartingOffset = 0) {
159  // Given a struct type, recursively traverse the elements.
160  if (StructType *STy = dyn_cast<StructType>(&Ty)) {
161  const StructLayout *SL = DL.getStructLayout(STy);
162  for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
163  computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
164  StartingOffset + SL->getElementOffset(I));
165  return;
166  }
167  // Given an array type, recursively traverse the elements.
168  if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
169  Type *EltTy = ATy->getElementType();
170  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
171  for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
172  computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
173  StartingOffset + i * EltSize);
174  return;
175  }
176  // Interpret void as zero return values.
177  if (Ty.isVoidTy())
178  return;
179  // Base case: we can get an LLT for this LLVM IR type.
180  ValueTys.push_back(getLLTForType(Ty, DL));
181  if (Offsets != nullptr)
182  Offsets->push_back(StartingOffset * 8);
183 }
184 
186 IRTranslator::allocateVRegs(const Value &Val) {
187  assert(!VMap.contains(Val) && "Value already allocated in VMap");
188  auto *Regs = VMap.getVRegs(Val);
189  auto *Offsets = VMap.getOffsets(Val);
190  SmallVector<LLT, 4> SplitTys;
191  computeValueLLTs(*DL, *Val.getType(), SplitTys,
192  Offsets->empty() ? Offsets : nullptr);
193  for (unsigned i = 0; i < SplitTys.size(); ++i)
194  Regs->push_back(0);
195  return *Regs;
196 }
197 
198 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
199  auto VRegsIt = VMap.findVRegs(Val);
200  if (VRegsIt != VMap.vregs_end())
201  return *VRegsIt->second;
202 
203  if (Val.getType()->isVoidTy())
204  return *VMap.getVRegs(Val);
205 
206  // Create entry for this type.
207  auto *VRegs = VMap.getVRegs(Val);
208  auto *Offsets = VMap.getOffsets(Val);
209 
210  assert(Val.getType()->isSized() &&
211  "Don't know how to create an empty vreg");
212 
213  SmallVector<LLT, 4> SplitTys;
214  computeValueLLTs(*DL, *Val.getType(), SplitTys,
215  Offsets->empty() ? Offsets : nullptr);
216 
217  if (!isa<Constant>(Val)) {
218  for (auto Ty : SplitTys)
219  VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
220  return *VRegs;
221  }
222 
223  if (Val.getType()->isAggregateType()) {
224  // UndefValue, ConstantAggregateZero
225  auto &C = cast<Constant>(Val);
226  unsigned Idx = 0;
227  while (auto Elt = C.getAggregateElement(Idx++)) {
228  auto EltRegs = getOrCreateVRegs(*Elt);
229  llvm::copy(EltRegs, std::back_inserter(*VRegs));
230  }
231  } else {
232  assert(SplitTys.size() == 1 && "unexpectedly split LLT");
233  VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
234  bool Success = translate(cast<Constant>(Val), VRegs->front());
235  if (!Success) {
236  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
237  MF->getFunction().getSubprogram(),
238  &MF->getFunction().getEntryBlock());
239  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
240  reportTranslationError(*MF, *TPC, *ORE, R);
241  return *VRegs;
242  }
243  }
244 
245  return *VRegs;
246 }
247 
248 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
249  if (FrameIndices.find(&AI) != FrameIndices.end())
250  return FrameIndices[&AI];
251 
252  unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
253  unsigned Size =
254  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
255 
256  // Always allocate at least one byte.
257  Size = std::max(Size, 1u);
258 
259  unsigned Alignment = AI.getAlignment();
260  if (!Alignment)
261  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
262 
263  int &FI = FrameIndices[&AI];
264  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
265  return FI;
266 }
267 
268 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
269  unsigned Alignment = 0;
270  Type *ValTy = nullptr;
271  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
272  Alignment = SI->getAlignment();
273  ValTy = SI->getValueOperand()->getType();
274  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
275  Alignment = LI->getAlignment();
276  ValTy = LI->getType();
277  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
278  // TODO(PR27168): This instruction has no alignment attribute, but unlike
279  // the default alignment for load/store, the default here is to assume
280  // it has NATURAL alignment, not DataLayout-specified alignment.
281  const DataLayout &DL = AI->getModule()->getDataLayout();
282  Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
283  ValTy = AI->getCompareOperand()->getType();
284  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
285  // TODO(PR27168): This instruction has no alignment attribute, but unlike
286  // the default alignment for load/store, the default here is to assume
287  // it has NATURAL alignment, not DataLayout-specified alignment.
288  const DataLayout &DL = AI->getModule()->getDataLayout();
289  Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
290  ValTy = AI->getType();
291  } else {
292  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
293  R << "unable to translate memop: " << ore::NV("Opcode", &I);
294  reportTranslationError(*MF, *TPC, *ORE, R);
295  return 1;
296  }
297 
298  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
299 }
300 
301 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
302  MachineBasicBlock *&MBB = BBToMBB[&BB];
303  assert(MBB && "BasicBlock was not encountered before");
304  return *MBB;
305 }
306 
307 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
308  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
309  MachinePreds[Edge].push_back(NewPred);
310 }
311 
312 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
313  MachineIRBuilder &MIRBuilder) {
314  // FIXME: handle signed/unsigned wrapping flags.
315 
316  // Get or create a virtual register for each value.
317  // Unless the value is a Constant => loadimm cst?
318  // or inline constant each time?
319  // Creation of a virtual register needs to have a size.
320  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
321  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
322  unsigned Res = getOrCreateVReg(U);
323  auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
324  if (isa<Instruction>(U)) {
325  MachineInstr *FBinOpMI = FBinOp.getInstr();
326  const Instruction &I = cast<Instruction>(U);
327  FBinOpMI->copyIRFlags(I);
328  }
329  return true;
330 }
331 
332 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
333  // -0.0 - X --> G_FNEG
334  if (isa<Constant>(U.getOperand(0)) &&
336  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
337  .addDef(getOrCreateVReg(U))
338  .addUse(getOrCreateVReg(*U.getOperand(1)));
339  return true;
340  }
341  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
342 }
343 
344 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
345  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
346  .addDef(getOrCreateVReg(U))
347  .addUse(getOrCreateVReg(*U.getOperand(1)));
348  return true;
349 }
350 
351 bool IRTranslator::translateCompare(const User &U,
352  MachineIRBuilder &MIRBuilder) {
353  const CmpInst *CI = dyn_cast<CmpInst>(&U);
354  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
355  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
356  unsigned Res = getOrCreateVReg(U);
357  CmpInst::Predicate Pred =
358  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
359  cast<ConstantExpr>(U).getPredicate());
360  if (CmpInst::isIntPredicate(Pred))
361  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
362  else if (Pred == CmpInst::FCMP_FALSE)
363  MIRBuilder.buildCopy(
364  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
365  else if (Pred == CmpInst::FCMP_TRUE)
366  MIRBuilder.buildCopy(
367  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
368  else {
369  auto FCmp = MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
370  FCmp->copyIRFlags(*CI);
371  }
372 
373  return true;
374 }
375 
376 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
377  const ReturnInst &RI = cast<ReturnInst>(U);
378  const Value *Ret = RI.getReturnValue();
379  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
380  Ret = nullptr;
381 
382  ArrayRef<unsigned> VRegs;
383  if (Ret)
384  VRegs = getOrCreateVRegs(*Ret);
385 
386  // The target may mess up with the insertion point, but
387  // this is not important as a return is the last instruction
388  // of the block anyway.
389 
390  return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
391 }
392 
393 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
394  const BranchInst &BrInst = cast<BranchInst>(U);
395  unsigned Succ = 0;
396  if (!BrInst.isUnconditional()) {
397  // We want a G_BRCOND to the true BB followed by an unconditional branch.
398  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
399  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
400  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
401  MIRBuilder.buildBrCond(Tst, TrueBB);
402  }
403 
404  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
405  MachineBasicBlock &TgtBB = getMBB(BrTgt);
406  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
407 
408  // If the unconditional target is the layout successor, fallthrough.
409  if (!CurBB.isLayoutSuccessor(&TgtBB))
410  MIRBuilder.buildBr(TgtBB);
411 
412  // Link successors.
413  for (const BasicBlock *Succ : successors(&BrInst))
414  CurBB.addSuccessor(&getMBB(*Succ));
415  return true;
416 }
417 
418 bool IRTranslator::translateSwitch(const User &U,
419  MachineIRBuilder &MIRBuilder) {
420  // For now, just translate as a chain of conditional branches.
421  // FIXME: could we share most of the logic/code in
422  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
423  // At first sight, it seems most of the logic in there is independent of
424  // SelectionDAG-specifics and a lot of work went in to optimize switch
425  // lowering in there.
426 
427  const SwitchInst &SwInst = cast<SwitchInst>(U);
428  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
429  const BasicBlock *OrigBB = SwInst.getParent();
430 
431  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
432  for (auto &CaseIt : SwInst.cases()) {
433  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
434  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
435  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
436  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
437  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
438  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
439 
440  MIRBuilder.buildBrCond(Tst, TrueMBB);
441  CurMBB.addSuccessor(&TrueMBB);
442  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
443 
444  MachineBasicBlock *FalseMBB =
445  MF->CreateMachineBasicBlock(SwInst.getParent());
446  // Insert the comparison blocks one after the other.
447  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
448  MIRBuilder.buildBr(*FalseMBB);
449  CurMBB.addSuccessor(FalseMBB);
450 
451  MIRBuilder.setMBB(*FalseMBB);
452  }
453  // handle default case
454  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
455  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
456  MIRBuilder.buildBr(DefaultMBB);
457  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
458  CurMBB.addSuccessor(&DefaultMBB);
459  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
460 
461  return true;
462 }
463 
464 bool IRTranslator::translateIndirectBr(const User &U,
465  MachineIRBuilder &MIRBuilder) {
466  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
467 
468  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
469  MIRBuilder.buildBrIndirect(Tgt);
470 
471  // Link successors.
472  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
473  for (const BasicBlock *Succ : successors(&BrInst))
474  CurBB.addSuccessor(&getMBB(*Succ));
475 
476  return true;
477 }
478 
479 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
480  const LoadInst &LI = cast<LoadInst>(U);
481 
482  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
484  Flags |= MachineMemOperand::MOLoad;
485 
486  if (DL->getTypeStoreSize(LI.getType()) == 0)
487  return true;
488 
489  ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
490  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
491  unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
492 
493  for (unsigned i = 0; i < Regs.size(); ++i) {
494  unsigned Addr = 0;
495  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
496 
497  MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
498  unsigned BaseAlign = getMemOpAlignment(LI);
499  auto MMO = MF->getMachineMemOperand(
500  Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
501  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
502  LI.getSyncScopeID(), LI.getOrdering());
503  MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
504  }
505 
506  return true;
507 }
508 
509 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
510  const StoreInst &SI = cast<StoreInst>(U);
511  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
514 
515  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
516  return true;
517 
518  ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
519  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
520  unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
521 
522  for (unsigned i = 0; i < Vals.size(); ++i) {
523  unsigned Addr = 0;
524  MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
525 
526  MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
527  unsigned BaseAlign = getMemOpAlignment(SI);
528  auto MMO = MF->getMachineMemOperand(
529  Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
530  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
531  SI.getSyncScopeID(), SI.getOrdering());
532  MIRBuilder.buildStore(Vals[i], Addr, *MMO);
533  }
534  return true;
535 }
536 
537 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
538  const Value *Src = U.getOperand(0);
540 
541  // getIndexedOffsetInType is designed for GEPs, so the first index is the
542  // usual array element rather than looking into the actual aggregate.
543  SmallVector<Value *, 1> Indices;
544  Indices.push_back(ConstantInt::get(Int32Ty, 0));
545 
546  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
547  for (auto Idx : EVI->indices())
548  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
549  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
550  for (auto Idx : IVI->indices())
551  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
552  } else {
553  for (unsigned i = 1; i < U.getNumOperands(); ++i)
554  Indices.push_back(U.getOperand(i));
555  }
556 
557  return 8 * static_cast<uint64_t>(
558  DL.getIndexedOffsetInType(Src->getType(), Indices));
559 }
560 
561 bool IRTranslator::translateExtractValue(const User &U,
562  MachineIRBuilder &MIRBuilder) {
563  const Value *Src = U.getOperand(0);
564  uint64_t Offset = getOffsetFromIndices(U, *DL);
565  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
566  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
567  unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
568  Offsets.begin();
569  auto &DstRegs = allocateVRegs(U);
570 
571  for (unsigned i = 0; i < DstRegs.size(); ++i)
572  DstRegs[i] = SrcRegs[Idx++];
573 
574  return true;
575 }
576 
577 bool IRTranslator::translateInsertValue(const User &U,
578  MachineIRBuilder &MIRBuilder) {
579  const Value *Src = U.getOperand(0);
580  uint64_t Offset = getOffsetFromIndices(U, *DL);
581  auto &DstRegs = allocateVRegs(U);
582  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
583  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
584  ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
585  auto InsertedIt = InsertedRegs.begin();
586 
587  for (unsigned i = 0; i < DstRegs.size(); ++i) {
588  if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
589  DstRegs[i] = *InsertedIt++;
590  else
591  DstRegs[i] = SrcRegs[i];
592  }
593 
594  return true;
595 }
596 
597 bool IRTranslator::translateSelect(const User &U,
598  MachineIRBuilder &MIRBuilder) {
599  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
600  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
601  ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
602  ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
603 
604  const SelectInst &SI = cast<SelectInst>(U);
605  const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition());
606  for (unsigned i = 0; i < ResRegs.size(); ++i) {
607  auto Select =
608  MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
609  if (Cmp && isa<FPMathOperator>(Cmp)) {
610  Select->copyIRFlags(*Cmp);
611  }
612  }
613 
614  return true;
615 }
616 
617 bool IRTranslator::translateBitCast(const User &U,
618  MachineIRBuilder &MIRBuilder) {
619  // If we're bitcasting to the source type, we can reuse the source vreg.
620  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
621  getLLTForType(*U.getType(), *DL)) {
622  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
623  auto &Regs = *VMap.getVRegs(U);
624  // If we already assigned a vreg for this bitcast, we can't change that.
625  // Emit a copy to satisfy the users we already emitted.
626  if (!Regs.empty())
627  MIRBuilder.buildCopy(Regs[0], SrcReg);
628  else {
629  Regs.push_back(SrcReg);
630  VMap.getOffsets(U)->push_back(0);
631  }
632  return true;
633  }
634  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
635 }
636 
637 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
638  MachineIRBuilder &MIRBuilder) {
639  unsigned Op = getOrCreateVReg(*U.getOperand(0));
640  unsigned Res = getOrCreateVReg(U);
641  MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
642  return true;
643 }
644 
645 bool IRTranslator::translateGetElementPtr(const User &U,
646  MachineIRBuilder &MIRBuilder) {
647  // FIXME: support vector GEPs.
648  if (U.getType()->isVectorTy())
649  return false;
650 
651  Value &Op0 = *U.getOperand(0);
652  unsigned BaseReg = getOrCreateVReg(Op0);
653  Type *PtrIRTy = Op0.getType();
654  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
655  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
656  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
657 
658  int64_t Offset = 0;
659  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
660  GTI != E; ++GTI) {
661  const Value *Idx = GTI.getOperand();
662  if (StructType *StTy = GTI.getStructTypeOrNull()) {
663  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
664  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
665  continue;
666  } else {
667  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
668 
669  // If this is a scalar constant or a splat vector of constants,
670  // handle it quickly.
671  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
672  Offset += ElementSize * CI->getSExtValue();
673  continue;
674  }
675 
676  if (Offset != 0) {
677  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
678  unsigned OffsetReg =
679  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
680  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
681 
682  BaseReg = NewBaseReg;
683  Offset = 0;
684  }
685 
686  unsigned IdxReg = getOrCreateVReg(*Idx);
687  if (MRI->getType(IdxReg) != OffsetTy) {
688  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
689  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
690  IdxReg = NewIdxReg;
691  }
692 
693  // N = N + Idx * ElementSize;
694  // Avoid doing it for ElementSize of 1.
695  unsigned GepOffsetReg;
696  if (ElementSize != 1) {
697  unsigned ElementSizeReg =
698  getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
699 
700  GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
701  MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
702  } else
703  GepOffsetReg = IdxReg;
704 
705  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
706  MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
707  BaseReg = NewBaseReg;
708  }
709  }
710 
711  if (Offset != 0) {
712  unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
713  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
714  return true;
715  }
716 
717  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
718  return true;
719 }
720 
721 bool IRTranslator::translateMemfunc(const CallInst &CI,
722  MachineIRBuilder &MIRBuilder,
723  unsigned ID) {
724  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
725  Type *DstTy = CI.getArgOperand(0)->getType();
726  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
727  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
728  return false;
729 
731  for (int i = 0; i < 3; ++i) {
732  const auto &Arg = CI.getArgOperand(i);
733  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
734  }
735 
736  const char *Callee;
737  switch (ID) {
738  case Intrinsic::memmove:
739  case Intrinsic::memcpy: {
740  Type *SrcTy = CI.getArgOperand(1)->getType();
741  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
742  return false;
743  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
744  break;
745  }
746  case Intrinsic::memset:
747  Callee = "memset";
748  break;
749  default:
750  return false;
751  }
752 
753  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
754  MachineOperand::CreateES(Callee),
755  CallLowering::ArgInfo(0, CI.getType()), Args);
756 }
757 
758 void IRTranslator::getStackGuard(unsigned DstReg,
759  MachineIRBuilder &MIRBuilder) {
760  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
761  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
762  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
763  MIB.addDef(DstReg);
764 
765  auto &TLI = *MF->getSubtarget().getTargetLowering();
766  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
767  if (!Global)
768  return;
769 
770  MachinePointerInfo MPInfo(Global);
774  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
775  DL->getPointerABIAlignment(0));
776  MIB.setMemRefs({MemRef});
777 }
778 
779 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
780  MachineIRBuilder &MIRBuilder) {
781  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
782  MIRBuilder.buildInstr(Op)
783  .addDef(ResRegs[0])
784  .addDef(ResRegs[1])
785  .addUse(getOrCreateVReg(*CI.getOperand(0)))
786  .addUse(getOrCreateVReg(*CI.getOperand(1)));
787 
788  return true;
789 }
790 
791 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
792  MachineIRBuilder &MIRBuilder) {
793  switch (ID) {
794  default:
795  break;
798  // Stack coloring is not enabled in O0 (which we care about now) so we can
799  // drop these. Make sure someone notices when we start compiling at higher
800  // opts though.
801  if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
802  return false;
803  return true;
804  case Intrinsic::dbg_declare: {
805  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
806  assert(DI.getVariable() && "Missing variable");
807 
808  const Value *Address = DI.getAddress();
809  if (!Address || isa<UndefValue>(Address)) {
810  LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
811  return true;
812  }
813 
815  MIRBuilder.getDebugLoc()) &&
816  "Expected inlined-at fields to agree");
817  auto AI = dyn_cast<AllocaInst>(Address);
818  if (AI && AI->isStaticAlloca()) {
819  // Static allocas are tracked at the MF level, no need for DBG_VALUE
820  // instructions (in fact, they get ignored if they *do* exist).
821  MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
822  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
823  } else {
824  // A dbg.declare describes the address of a source variable, so lower it
825  // into an indirect DBG_VALUE.
826  MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
827  DI.getVariable(), DI.getExpression());
828  }
829  return true;
830  }
831  case Intrinsic::dbg_label: {
832  const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
833  assert(DI.getLabel() && "Missing label");
834 
836  MIRBuilder.getDebugLoc()) &&
837  "Expected inlined-at fields to agree");
838 
839  MIRBuilder.buildDbgLabel(DI.getLabel());
840  return true;
841  }
842  case Intrinsic::vaend:
843  // No target I know of cares about va_end. Certainly no in-tree target
844  // does. Simplest intrinsic ever!
845  return true;
846  case Intrinsic::vastart: {
847  auto &TLI = *MF->getSubtarget().getTargetLowering();
848  Value *Ptr = CI.getArgOperand(0);
849  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
850 
851  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
852  .addUse(getOrCreateVReg(*Ptr))
853  .addMemOperand(MF->getMachineMemOperand(
854  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
855  return true;
856  }
857  case Intrinsic::dbg_value: {
858  // This form of DBG_VALUE is target-independent.
859  const DbgValueInst &DI = cast<DbgValueInst>(CI);
860  const Value *V = DI.getValue();
862  MIRBuilder.getDebugLoc()) &&
863  "Expected inlined-at fields to agree");
864  if (!V) {
865  // Currently the optimizer can produce this; insert an undef to
866  // help debugging. Probably the optimizer should not do this.
867  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
868  } else if (const auto *CI = dyn_cast<Constant>(V)) {
869  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
870  } else {
871  unsigned Reg = getOrCreateVReg(*V);
872  // FIXME: This does not handle register-indirect values at offset 0. The
873  // direct/indirect thing shouldn't really be handled by something as
874  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
875  // pretty baked in right now.
876  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
877  }
878  return true;
879  }
881  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
883  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
885  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
887  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
889  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
891  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
892  case Intrinsic::pow: {
893  auto Pow = MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
894  .addDef(getOrCreateVReg(CI))
895  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
896  .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
897  Pow->copyIRFlags(CI);
898  return true;
899  }
900  case Intrinsic::exp: {
901  auto Exp = MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
902  .addDef(getOrCreateVReg(CI))
903  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
904  Exp->copyIRFlags(CI);
905  return true;
906  }
907  case Intrinsic::exp2: {
908  auto Exp2 = MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
909  .addDef(getOrCreateVReg(CI))
910  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
911  Exp2->copyIRFlags(CI);
912  return true;
913  }
914  case Intrinsic::log: {
915  auto Log = MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
916  .addDef(getOrCreateVReg(CI))
917  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
918  Log->copyIRFlags(CI);
919  return true;
920  }
921  case Intrinsic::log2: {
922  auto Log2 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
923  .addDef(getOrCreateVReg(CI))
924  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
925  Log2->copyIRFlags(CI);
926  return true;
927  }
928  case Intrinsic::log10: {
929  auto Log10 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG10)
930  .addDef(getOrCreateVReg(CI))
931  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
932  Log10->copyIRFlags(CI);
933  return true;
934  }
935  case Intrinsic::fabs: {
936  auto Fabs = MIRBuilder.buildInstr(TargetOpcode::G_FABS)
937  .addDef(getOrCreateVReg(CI))
938  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
939  Fabs->copyIRFlags(CI);
940  return true;
941  }
942  case Intrinsic::trunc:
943  MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC)
944  .addDef(getOrCreateVReg(CI))
945  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
946  return true;
947  case Intrinsic::round:
948  MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND)
949  .addDef(getOrCreateVReg(CI))
950  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
951  return true;
952  case Intrinsic::fma: {
953  auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA)
954  .addDef(getOrCreateVReg(CI))
955  .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
956  .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
957  .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
958  FMA->copyIRFlags(CI);
959  return true;
960  }
961  case Intrinsic::fmuladd: {
962  const TargetMachine &TM = MF->getTarget();
963  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
964  unsigned Dst = getOrCreateVReg(CI);
965  unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
966  unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
967  unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
969  TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
970  // TODO: Revisit this to see if we should move this part of the
971  // lowering to the combiner.
972  auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2});
973  FMA->copyIRFlags(CI);
974  } else {
975  LLT Ty = getLLTForType(*CI.getType(), *DL);
976  auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1});
977  FMul->copyIRFlags(CI);
978  auto FAdd = MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2});
979  FAdd->copyIRFlags(CI);
980  }
981  return true;
982  }
983  case Intrinsic::memcpy:
984  case Intrinsic::memmove:
985  case Intrinsic::memset:
986  return translateMemfunc(CI, MIRBuilder, ID);
989  unsigned Reg = getOrCreateVReg(CI);
990  unsigned TypeID = MF->getTypeIDFor(GV);
991  MIRBuilder.buildConstant(Reg, TypeID);
992  return true;
993  }
994  case Intrinsic::objectsize: {
995  // If we don't know by now, we're never going to know.
996  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
997 
998  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
999  return true;
1000  }
1002  // If this wasn't constant-folded away by now, then it's not a
1003  // constant.
1004  MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1005  return true;
1006  case Intrinsic::stackguard:
1007  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1008  return true;
1010  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1011  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1012  getStackGuard(GuardVal, MIRBuilder);
1013 
1014  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1015  int FI = getOrCreateFrameIndex(*Slot);
1016  MF->getFrameInfo().setStackProtectorIndex(FI);
1017 
1018  MIRBuilder.buildStore(
1019  GuardVal, getOrCreateVReg(*Slot),
1020  *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1023  PtrTy.getSizeInBits() / 8, 8));
1024  return true;
1025  }
1026  case Intrinsic::cttz:
1027  case Intrinsic::ctlz: {
1028  ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1029  bool isTrailing = ID == Intrinsic::cttz;
1030  unsigned Opcode = isTrailing
1031  ? Cst->isZero() ? TargetOpcode::G_CTTZ
1032  : TargetOpcode::G_CTTZ_ZERO_UNDEF
1033  : Cst->isZero() ? TargetOpcode::G_CTLZ
1034  : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1035  MIRBuilder.buildInstr(Opcode)
1036  .addDef(getOrCreateVReg(CI))
1037  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1038  return true;
1039  }
1040  case Intrinsic::ctpop: {
1041  MIRBuilder.buildInstr(TargetOpcode::G_CTPOP)
1042  .addDef(getOrCreateVReg(CI))
1043  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1044  return true;
1045  }
1047  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1048  unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1049  MIRBuilder.buildUndef(Undef);
1050  return true;
1051  }
1053  return true;
1054  case Intrinsic::ceil:
1055  MIRBuilder.buildInstr(TargetOpcode::G_FCEIL)
1056  .addDef(getOrCreateVReg(CI))
1057  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1058  return true;
1059  }
1060  return false;
1061 }
1062 
1063 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1064  MachineIRBuilder &MIRBuilder) {
1065  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1066  if (!IA.getConstraintString().empty())
1067  return false;
1068 
1069  unsigned ExtraInfo = 0;
1070  if (IA.hasSideEffects())
1071  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1072  if (IA.getDialect() == InlineAsm::AD_Intel)
1073  ExtraInfo |= InlineAsm::Extra_AsmDialect;
1074 
1076  .addExternalSymbol(IA.getAsmString().c_str())
1077  .addImm(ExtraInfo);
1078 
1079  return true;
1080 }
1081 
1082 unsigned IRTranslator::packRegs(const Value &V,
1083  MachineIRBuilder &MIRBuilder) {
1084  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1085  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1086  LLT BigTy = getLLTForType(*V.getType(), *DL);
1087 
1088  if (Regs.size() == 1)
1089  return Regs[0];
1090 
1091  unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1092  MIRBuilder.buildUndef(Dst);
1093  for (unsigned i = 0; i < Regs.size(); ++i) {
1094  unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1095  MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1096  Dst = NewDst;
1097  }
1098  return Dst;
1099 }
1100 
1101 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1102  MachineIRBuilder &MIRBuilder) {
1103  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1104  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1105 
1106  for (unsigned i = 0; i < Regs.size(); ++i)
1107  MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1108 }
1109 
1110 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1111  const CallInst &CI = cast<CallInst>(U);
1112  auto TII = MF->getTarget().getIntrinsicInfo();
1113  const Function *F = CI.getCalledFunction();
1114 
1115  // FIXME: support Windows dllimport function calls.
1116  if (F && F->hasDLLImportStorageClass())
1117  return false;
1118 
1119  if (CI.isInlineAsm())
1120  return translateInlineAsm(CI, MIRBuilder);
1121 
1123  if (F && F->isIntrinsic()) {
1124  ID = F->getIntrinsicID();
1125  if (TII && ID == Intrinsic::not_intrinsic)
1126  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1127  }
1128 
1129  bool IsSplitType = valueIsSplit(CI);
1130  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1131  unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1132  getLLTForType(*CI.getType(), *DL))
1133  : getOrCreateVReg(CI);
1134 
1136  for (auto &Arg: CI.arg_operands())
1137  Args.push_back(packRegs(*Arg, MIRBuilder));
1138 
1139  MF->getFrameInfo().setHasCalls(true);
1140  bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1141  return getOrCreateVReg(*CI.getCalledValue());
1142  });
1143 
1144  if (IsSplitType)
1145  unpackRegs(CI, Res, MIRBuilder);
1146  return Success;
1147  }
1148 
1149  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1150 
1151  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1152  return true;
1153 
1154  unsigned Res = 0;
1155  if (!CI.getType()->isVoidTy()) {
1156  if (IsSplitType)
1157  Res =
1158  MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
1159  else
1160  Res = getOrCreateVReg(CI);
1161  }
1162  MachineInstrBuilder MIB =
1163  MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
1164 
1165  for (auto &Arg : CI.arg_operands()) {
1166  // Some intrinsics take metadata parameters. Reject them.
1167  if (isa<MetadataAsValue>(Arg))
1168  return false;
1169  MIB.addUse(packRegs(*Arg, MIRBuilder));
1170  }
1171 
1172  if (IsSplitType)
1173  unpackRegs(CI, Res, MIRBuilder);
1174 
1175  // Add a MachineMemOperand if it is a target mem intrinsic.
1176  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1177  TargetLowering::IntrinsicInfo Info;
1178  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1179  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1180  uint64_t Size = Info.memVT.getStoreSize();
1181  MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1182  Info.flags, Size, Info.align));
1183  }
1184 
1185  return true;
1186 }
1187 
1188 bool IRTranslator::translateInvoke(const User &U,
1189  MachineIRBuilder &MIRBuilder) {
1190  const InvokeInst &I = cast<InvokeInst>(U);
1191  MCContext &Context = MF->getContext();
1192 
1193  const BasicBlock *ReturnBB = I.getSuccessor(0);
1194  const BasicBlock *EHPadBB = I.getSuccessor(1);
1195 
1196  const Value *Callee = I.getCalledValue();
1197  const Function *Fn = dyn_cast<Function>(Callee);
1198  if (isa<InlineAsm>(Callee))
1199  return false;
1200 
1201  // FIXME: support invoking patchpoint and statepoint intrinsics.
1202  if (Fn && Fn->isIntrinsic())
1203  return false;
1204 
1205  // FIXME: support whatever these are.
1207  return false;
1208 
1209  // FIXME: support Windows exception handling.
1210  if (!isa<LandingPadInst>(EHPadBB->front()))
1211  return false;
1212 
1213  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1214  // the region covered by the try.
1215  MCSymbol *BeginSymbol = Context.createTempSymbol();
1216  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1217 
1218  unsigned Res =
1219  MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1221  for (auto &Arg: I.arg_operands())
1222  Args.push_back(packRegs(*Arg, MIRBuilder));
1223 
1224  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1225  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1226  return false;
1227 
1228  unpackRegs(I, Res, MIRBuilder);
1229 
1230  MCSymbol *EndSymbol = Context.createTempSymbol();
1231  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1232 
1233  // FIXME: track probabilities.
1234  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1235  &ReturnMBB = getMBB(*ReturnBB);
1236  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1237  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1238  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1239  MIRBuilder.buildBr(ReturnMBB);
1240 
1241  return true;
1242 }
1243 
1244 bool IRTranslator::translateLandingPad(const User &U,
1245  MachineIRBuilder &MIRBuilder) {
1246  const LandingPadInst &LP = cast<LandingPadInst>(U);
1247 
1248  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1249 
1250  MBB.setIsEHPad();
1251 
1252  // If there aren't registers to copy the values into (e.g., during SjLj
1253  // exceptions), then don't bother.
1254  auto &TLI = *MF->getSubtarget().getTargetLowering();
1255  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1256  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1257  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1258  return true;
1259 
1260  // If landingpad's return type is token type, we don't create DAG nodes
1261  // for its exception pointer and selector value. The extraction of exception
1262  // pointer or selector value from token type landingpads is not currently
1263  // supported.
1264  if (LP.getType()->isTokenTy())
1265  return true;
1266 
1267  // Add a label to mark the beginning of the landing pad. Deletion of the
1268  // landing pad can thus be detected via the MachineModuleInfo.
1270  .addSym(MF->addLandingPad(&MBB));
1271 
1272  LLT Ty = getLLTForType(*LP.getType(), *DL);
1273  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1274  MIRBuilder.buildUndef(Undef);
1275 
1276  SmallVector<LLT, 2> Tys;
1277  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1278  Tys.push_back(getLLTForType(*Ty, *DL));
1279  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1280 
1281  // Mark exception register as live in.
1282  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1283  if (!ExceptionReg)
1284  return false;
1285 
1286  MBB.addLiveIn(ExceptionReg);
1287  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1288  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1289 
1290  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1291  if (!SelectorReg)
1292  return false;
1293 
1294  MBB.addLiveIn(SelectorReg);
1295  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1296  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1297  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1298 
1299  return true;
1300 }
1301 
1302 bool IRTranslator::translateAlloca(const User &U,
1303  MachineIRBuilder &MIRBuilder) {
1304  auto &AI = cast<AllocaInst>(U);
1305 
1306  if (AI.isSwiftError())
1307  return false;
1308 
1309  if (AI.isStaticAlloca()) {
1310  unsigned Res = getOrCreateVReg(AI);
1311  int FI = getOrCreateFrameIndex(AI);
1312  MIRBuilder.buildFrameIndex(Res, FI);
1313  return true;
1314  }
1315 
1316  // FIXME: support stack probing for Windows.
1317  if (MF->getTarget().getTargetTriple().isOSWindows())
1318  return false;
1319 
1320  // Now we're in the harder dynamic case.
1321  Type *Ty = AI.getAllocatedType();
1322  unsigned Align =
1323  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1324 
1325  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1326 
1327  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1328  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1329  if (MRI->getType(NumElts) != IntPtrTy) {
1330  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1331  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1332  NumElts = ExtElts;
1333  }
1334 
1335  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1336  unsigned TySize =
1337  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1338  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1339 
1340  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1341  auto &TLI = *MF->getSubtarget().getTargetLowering();
1342  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1343 
1344  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1345  MIRBuilder.buildCopy(SPTmp, SPReg);
1346 
1347  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1348  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1349 
1350  // Handle alignment. We have to realign if the allocation granule was smaller
1351  // than stack alignment, or the specific alloca requires more than stack
1352  // alignment.
1353  unsigned StackAlign =
1354  MF->getSubtarget().getFrameLowering()->getStackAlignment();
1355  Align = std::max(Align, StackAlign);
1356  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1357  // Round the size of the allocation up to the stack alignment size
1358  // by add SA-1 to the size. This doesn't overflow because we're computing
1359  // an address inside an alloca.
1360  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1361  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1362  AllocTmp = AlignedAlloc;
1363  }
1364 
1365  MIRBuilder.buildCopy(SPReg, AllocTmp);
1366  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1367 
1368  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1369  assert(MF->getFrameInfo().hasVarSizedObjects());
1370  return true;
1371 }
1372 
1373 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1374  // FIXME: We may need more info about the type. Because of how LLT works,
1375  // we're completely discarding the i64/double distinction here (amongst
1376  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1377  // anyway but that's not guaranteed.
1378  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1379  .addDef(getOrCreateVReg(U))
1380  .addUse(getOrCreateVReg(*U.getOperand(0)))
1381  .addImm(DL->getABITypeAlignment(U.getType()));
1382  return true;
1383 }
1384 
1385 bool IRTranslator::translateInsertElement(const User &U,
1386  MachineIRBuilder &MIRBuilder) {
1387  // If it is a <1 x Ty> vector, use the scalar as it is
1388  // not a legal vector type in LLT.
1389  if (U.getType()->getVectorNumElements() == 1) {
1390  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1391  auto &Regs = *VMap.getVRegs(U);
1392  if (Regs.empty()) {
1393  Regs.push_back(Elt);
1394  VMap.getOffsets(U)->push_back(0);
1395  } else {
1396  MIRBuilder.buildCopy(Regs[0], Elt);
1397  }
1398  return true;
1399  }
1400 
1401  unsigned Res = getOrCreateVReg(U);
1402  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1403  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1404  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1405  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1406  return true;
1407 }
1408 
1409 bool IRTranslator::translateExtractElement(const User &U,
1410  MachineIRBuilder &MIRBuilder) {
1411  // If it is a <1 x Ty> vector, use the scalar as it is
1412  // not a legal vector type in LLT.
1413  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1414  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1415  auto &Regs = *VMap.getVRegs(U);
1416  if (Regs.empty()) {
1417  Regs.push_back(Elt);
1418  VMap.getOffsets(U)->push_back(0);
1419  } else {
1420  MIRBuilder.buildCopy(Regs[0], Elt);
1421  }
1422  return true;
1423  }
1424  unsigned Res = getOrCreateVReg(U);
1425  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1426  const auto &TLI = *MF->getSubtarget().getTargetLowering();
1427  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1428  unsigned Idx = 0;
1429  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1430  if (CI->getBitWidth() != PreferredVecIdxWidth) {
1431  APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1432  auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1433  Idx = getOrCreateVReg(*NewIdxCI);
1434  }
1435  }
1436  if (!Idx)
1437  Idx = getOrCreateVReg(*U.getOperand(1));
1438  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1439  const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1440  Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1441  }
1442  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1443  return true;
1444 }
1445 
1446 bool IRTranslator::translateShuffleVector(const User &U,
1447  MachineIRBuilder &MIRBuilder) {
1448  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1449  .addDef(getOrCreateVReg(U))
1450  .addUse(getOrCreateVReg(*U.getOperand(0)))
1451  .addUse(getOrCreateVReg(*U.getOperand(1)))
1452  .addUse(getOrCreateVReg(*U.getOperand(2)));
1453  return true;
1454 }
1455 
1456 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1457  const PHINode &PI = cast<PHINode>(U);
1458 
1460  for (auto Reg : getOrCreateVRegs(PI)) {
1461  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1462  Insts.push_back(MIB.getInstr());
1463  }
1464 
1465  PendingPHIs.emplace_back(&PI, std::move(Insts));
1466  return true;
1467 }
1468 
1469 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1470  MachineIRBuilder &MIRBuilder) {
1471  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1472 
1473  if (I.isWeak())
1474  return false;
1475 
1476  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1479 
1480  Type *ResType = I.getType();
1481  Type *ValType = ResType->Type::getStructElementType(0);
1482 
1483  auto Res = getOrCreateVRegs(I);
1484  unsigned OldValRes = Res[0];
1485  unsigned SuccessRes = Res[1];
1486  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1487  unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1488  unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1489 
1490  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1491  OldValRes, SuccessRes, Addr, Cmp, NewVal,
1492  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1493  Flags, DL->getTypeStoreSize(ValType),
1494  getMemOpAlignment(I), AAMDNodes(), nullptr,
1496  I.getFailureOrdering()));
1497  return true;
1498 }
1499 
1500 bool IRTranslator::translateAtomicRMW(const User &U,
1501  MachineIRBuilder &MIRBuilder) {
1502  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1503 
1504  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1507 
1508  Type *ResType = I.getType();
1509 
1510  unsigned Res = getOrCreateVReg(I);
1511  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1512  unsigned Val = getOrCreateVReg(*I.getValOperand());
1513 
1514  unsigned Opcode = 0;
1515  switch (I.getOperation()) {
1516  default:
1517  llvm_unreachable("Unknown atomicrmw op");
1518  return false;
1519  case AtomicRMWInst::Xchg:
1520  Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1521  break;
1522  case AtomicRMWInst::Add:
1523  Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1524  break;
1525  case AtomicRMWInst::Sub:
1526  Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1527  break;
1528  case AtomicRMWInst::And:
1529  Opcode = TargetOpcode::G_ATOMICRMW_AND;
1530  break;
1531  case AtomicRMWInst::Nand:
1532  Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1533  break;
1534  case AtomicRMWInst::Or:
1535  Opcode = TargetOpcode::G_ATOMICRMW_OR;
1536  break;
1537  case AtomicRMWInst::Xor:
1538  Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1539  break;
1540  case AtomicRMWInst::Max:
1541  Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1542  break;
1543  case AtomicRMWInst::Min:
1544  Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1545  break;
1546  case AtomicRMWInst::UMax:
1547  Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1548  break;
1549  case AtomicRMWInst::UMin:
1550  Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1551  break;
1552  }
1553 
1554  MIRBuilder.buildAtomicRMW(
1555  Opcode, Res, Addr, Val,
1556  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1557  Flags, DL->getTypeStoreSize(ResType),
1558  getMemOpAlignment(I), AAMDNodes(), nullptr,
1559  I.getSyncScopeID(), I.getOrdering()));
1560  return true;
1561 }
1562 
1563 void IRTranslator::finishPendingPhis() {
1564 #ifndef NDEBUG
1565  DILocationVerifier Verifier;
1566  GISelObserverWrapper WrapperObserver(&Verifier);
1567  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1568 #endif // ifndef NDEBUG
1569  for (auto &Phi : PendingPHIs) {
1570  const PHINode *PI = Phi.first;
1571  ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1572  EntryBuilder->setDebugLoc(PI->getDebugLoc());
1573 #ifndef NDEBUG
1574  Verifier.setCurrentInst(PI);
1575 #endif // ifndef NDEBUG
1576 
1577  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1578  // won't create extra control flow here, otherwise we need to find the
1579  // dominating predecessor here (or perhaps force the weirder IRTranslators
1580  // to provide a simple boundary).
1581  SmallSet<const BasicBlock *, 4> HandledPreds;
1582 
1583  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1584  auto IRPred = PI->getIncomingBlock(i);
1585  if (HandledPreds.count(IRPred))
1586  continue;
1587 
1588  HandledPreds.insert(IRPred);
1589  ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1590  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1591  assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1592  "incorrect CFG at MachineBasicBlock level");
1593  for (unsigned j = 0; j < ValRegs.size(); ++j) {
1594  MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1595  MIB.addUse(ValRegs[j]);
1596  MIB.addMBB(Pred);
1597  }
1598  }
1599  }
1600  }
1601 }
1602 
1603 bool IRTranslator::valueIsSplit(const Value &V,
1605  SmallVector<LLT, 4> SplitTys;
1606  if (Offsets && !Offsets->empty())
1607  Offsets->clear();
1608  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1609  return SplitTys.size() > 1;
1610 }
1611 
1612 bool IRTranslator::translate(const Instruction &Inst) {
1613  CurBuilder->setDebugLoc(Inst.getDebugLoc());
1614  EntryBuilder->setDebugLoc(Inst.getDebugLoc());
1615  switch(Inst.getOpcode()) {
1616 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1617  case Instruction::OPCODE: \
1618  return translate##OPCODE(Inst, *CurBuilder.get());
1619 #include "llvm/IR/Instruction.def"
1620  default:
1621  return false;
1622  }
1623 }
1624 
1625 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1626  if (auto CI = dyn_cast<ConstantInt>(&C))
1627  EntryBuilder->buildConstant(Reg, *CI);
1628  else if (auto CF = dyn_cast<ConstantFP>(&C))
1629  EntryBuilder->buildFConstant(Reg, *CF);
1630  else if (isa<UndefValue>(C))
1631  EntryBuilder->buildUndef(Reg);
1632  else if (isa<ConstantPointerNull>(C)) {
1633  // As we are trying to build a constant val of 0 into a pointer,
1634  // insert a cast to make them correct with respect to types.
1635  unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1636  auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1637  auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1638  unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1639  EntryBuilder->buildCast(Reg, ZeroReg);
1640  } else if (auto GV = dyn_cast<GlobalValue>(&C))
1641  EntryBuilder->buildGlobalValue(Reg, GV);
1642  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1643  if (!CAZ->getType()->isVectorTy())
1644  return false;
1645  // Return the scalar if it is a <1 x Ty> vector.
1646  if (CAZ->getNumElements() == 1)
1647  return translate(*CAZ->getElementValue(0u), Reg);
1649  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1650  Constant &Elt = *CAZ->getElementValue(i);
1651  Ops.push_back(getOrCreateVReg(Elt));
1652  }
1653  EntryBuilder->buildBuildVector(Reg, Ops);
1654  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1655  // Return the scalar if it is a <1 x Ty> vector.
1656  if (CV->getNumElements() == 1)
1657  return translate(*CV->getElementAsConstant(0), Reg);
1659  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1660  Constant &Elt = *CV->getElementAsConstant(i);
1661  Ops.push_back(getOrCreateVReg(Elt));
1662  }
1663  EntryBuilder->buildBuildVector(Reg, Ops);
1664  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1665  switch(CE->getOpcode()) {
1666 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1667  case Instruction::OPCODE: \
1668  return translate##OPCODE(*CE, *EntryBuilder.get());
1669 #include "llvm/IR/Instruction.def"
1670  default:
1671  return false;
1672  }
1673  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1674  if (CV->getNumOperands() == 1)
1675  return translate(*CV->getOperand(0), Reg);
1677  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1678  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1679  }
1680  EntryBuilder->buildBuildVector(Reg, Ops);
1681  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1682  EntryBuilder->buildBlockAddress(Reg, BA);
1683  } else
1684  return false;
1685 
1686  return true;
1687 }
1688 
1689 void IRTranslator::finalizeFunction() {
1690  // Release the memory used by the different maps we
1691  // needed during the translation.
1692  PendingPHIs.clear();
1693  VMap.reset();
1694  FrameIndices.clear();
1695  MachinePreds.clear();
1696  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1697  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1698  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1699  EntryBuilder.reset();
1700  CurBuilder.reset();
1701 }
1702 
1704  MF = &CurMF;
1705  const Function &F = MF->getFunction();
1706  if (F.empty())
1707  return false;
1709  getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
1710  // Set the CSEConfig and run the analysis.
1711  GISelCSEInfo *CSEInfo = nullptr;
1712  TPC = &getAnalysis<TargetPassConfig>();
1713  bool IsO0 = TPC->getOptLevel() == CodeGenOpt::Level::None;
1714  // Disable CSE for O0.
1715  bool EnableCSE = !IsO0 && EnableCSEInIRTranslator;
1716  if (EnableCSE) {
1717  EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
1718  std::unique_ptr<CSEConfig> Config = make_unique<CSEConfig>();
1719  CSEInfo = &Wrapper.get(std::move(Config));
1720  EntryBuilder->setCSEInfo(CSEInfo);
1721  CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
1722  CurBuilder->setCSEInfo(CSEInfo);
1723  } else {
1724  EntryBuilder = make_unique<MachineIRBuilder>();
1725  CurBuilder = make_unique<MachineIRBuilder>();
1726  }
1727  CLI = MF->getSubtarget().getCallLowering();
1728  CurBuilder->setMF(*MF);
1729  EntryBuilder->setMF(*MF);
1730  MRI = &MF->getRegInfo();
1731  DL = &F.getParent()->getDataLayout();
1732  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1733 
1734  assert(PendingPHIs.empty() && "stale PHIs");
1735 
1736  if (!DL->isLittleEndian()) {
1737  // Currently we don't properly handle big endian code.
1738  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1739  F.getSubprogram(), &F.getEntryBlock());
1740  R << "unable to translate in big endian mode";
1741  reportTranslationError(*MF, *TPC, *ORE, R);
1742  }
1743 
1744  // Release the per-function state when we return, whether we succeeded or not.
1745  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1746 
1747  // Setup a separate basic-block for the arguments and constants
1748  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1749  MF->push_back(EntryBB);
1750  EntryBuilder->setMBB(*EntryBB);
1751 
1752  // Create all blocks, in IR order, to preserve the layout.
1753  for (const BasicBlock &BB: F) {
1754  auto *&MBB = BBToMBB[&BB];
1755 
1756  MBB = MF->CreateMachineBasicBlock(&BB);
1757  MF->push_back(MBB);
1758 
1759  if (BB.hasAddressTaken())
1760  MBB->setHasAddressTaken();
1761  }
1762 
1763  // Make our arguments/constants entry block fallthrough to the IR entry block.
1764  EntryBB->addSuccessor(&getMBB(F.front()));
1765 
1766  // Lower the actual args into this basic block.
1767  SmallVector<unsigned, 8> VRegArgs;
1768  for (const Argument &Arg: F.args()) {
1769  if (DL->getTypeStoreSize(Arg.getType()) == 0)
1770  continue; // Don't handle zero sized types.
1771  VRegArgs.push_back(
1772  MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1773  }
1774 
1775  // We don't currently support translating swifterror or swiftself functions.
1776  for (auto &Arg : F.args()) {
1777  if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1778  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1779  F.getSubprogram(), &F.getEntryBlock());
1780  R << "unable to lower arguments due to swifterror/swiftself: "
1781  << ore::NV("Prototype", F.getType());
1782  reportTranslationError(*MF, *TPC, *ORE, R);
1783  return false;
1784  }
1785  }
1786 
1787  if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
1788  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1789  F.getSubprogram(), &F.getEntryBlock());
1790  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1791  reportTranslationError(*MF, *TPC, *ORE, R);
1792  return false;
1793  }
1794 
1795  auto ArgIt = F.arg_begin();
1796  for (auto &VArg : VRegArgs) {
1797  // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1798  // creating redundant copies.
1799  if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1800  auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1801  assert(VRegs.empty() && "VRegs already populated?");
1802  VRegs.push_back(VArg);
1803  } else {
1804  unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
1805  }
1806  ArgIt++;
1807  }
1808 
1809  // Need to visit defs before uses when translating instructions.
1810  GISelObserverWrapper WrapperObserver;
1811  if (EnableCSE && CSEInfo)
1812  WrapperObserver.addObserver(CSEInfo);
1813  {
1815 #ifndef NDEBUG
1816  DILocationVerifier Verifier;
1817  WrapperObserver.addObserver(&Verifier);
1818 #endif // ifndef NDEBUG
1819  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1820  for (const BasicBlock *BB : RPOT) {
1821  MachineBasicBlock &MBB = getMBB(*BB);
1822  // Set the insertion point of all the following translations to
1823  // the end of this basic block.
1824  CurBuilder->setMBB(MBB);
1825 
1826  for (const Instruction &Inst : *BB) {
1827 #ifndef NDEBUG
1828  Verifier.setCurrentInst(&Inst);
1829 #endif // ifndef NDEBUG
1830  if (translate(Inst))
1831  continue;
1832 
1833  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1834  Inst.getDebugLoc(), BB);
1835  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1836 
1837  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1838  std::string InstStrStorage;
1839  raw_string_ostream InstStr(InstStrStorage);
1840  InstStr << Inst;
1841 
1842  R << ": '" << InstStr.str() << "'";
1843  }
1844 
1845  reportTranslationError(*MF, *TPC, *ORE, R);
1846  return false;
1847  }
1848  }
1849 #ifndef NDEBUG
1850  WrapperObserver.removeObserver(&Verifier);
1851 #endif
1852  }
1853 
1854  finishPendingPhis();
1855 
1856  // Merge the argument lowering and constants block with its single
1857  // successor, the LLVM-IR entry block. We want the basic block to
1858  // be maximal.
1859  assert(EntryBB->succ_size() == 1 &&
1860  "Custom BB used for lowering should have only one successor");
1861  // Get the successor of the current entry block.
1862  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1863  assert(NewEntryBB.pred_size() == 1 &&
1864  "LLVM-IR entry block has a predecessor!?");
1865  // Move all the instruction from the current entry block to the
1866  // new entry block.
1867  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1868  EntryBB->end());
1869 
1870  // Update the live-in information for the new entry block.
1871  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1872  NewEntryBB.addLiveIn(LiveIn);
1873  NewEntryBB.sortUniqueLiveIns();
1874 
1875  // Get rid of the now empty basic block.
1876  EntryBB->removeSuccessor(&NewEntryBB);
1877  MF->remove(EntryBB);
1878  MF->DeleteMachineBasicBlock(EntryBB);
1879 
1880  assert(&MF->front() == &NewEntryBB &&
1881  "New entry wasn't next in the list of basic block!");
1882 
1883  // Initialize stack protector information.
1884  StackProtector &SP = getAnalysis<StackProtector>();
1885  SP.copyToMachineFrameInfo(MF->getFrameInfo());
1886 
1887  return false;
1888 }
const NoneType None
Definition: None.h:24
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:410
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:199
A simple RAII based CSEInfo installer.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:636
bool empty() const
Definition: Function.h:662
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:173
The CSE Analysis object.
Definition: CSEInfo.h:69
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
*p = old <signed v ? old : v
Definition: Instructions.h:722
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
LLVMContext & Context
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1429
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
DiagnosticInfoOptimizationBase::Argument NV
This represents the llvm.dbg.label instruction.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
This class represents lattice values for constants.
Definition: AllocatorList.h:24
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:289
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
iterator begin() const
Definition: ArrayRef.h:137
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:529
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:588
void push_back(const T &Elt)
Definition: SmallVector.h:218
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
unsigned getReg() const
getReg - Returns the register number.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
This file contains the declarations for metadata subclasses.
Value * getCondition() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:385
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:81
*p = old <unsigned v ? old : v
Definition: Instructions.h:726
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:136
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:248
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1025
*p = old >unsigned v ? old : v
Definition: Instructions.h:724
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:705
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:262
BasicBlock * getSuccessor(unsigned i) const
unsigned const TargetRegisterInfo * TRI
F(f)
The actual analysis pass wrapper.
Definition: CSEInfo.h:213
An instruction for reading from memory.
Definition: Instructions.h:168
void setMF(MachineFunction &MF)
Definition: CSEInfo.cpp:67
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:692
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:230
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index)
Build and insert `Res0, ...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:118
*p = old >signed v ? old : v
Definition: Instructions.h:720
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:265
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:596
bool hasSideEffects() const
Definition: InlineAsm.h:67
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1135
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
AnalysisUsage & addRequired()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:529
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:232
A description of a memory reference used in the backend.
GISelCSEInfo & get(std::unique_ptr< CSEConfig > CSEOpt, bool ReCompute=false)
Takes a CSEConfig object that defines what opcodes get CSEd.
Definition: CSEInfo.cpp:352
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1656
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:371
#define DEBUG_TYPE
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:113
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Class to represent struct types.
Definition: DerivedTypes.h:201
DILabel * getLabel() const
BinOp getOperation() const
Definition: Instructions.h:745
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:570
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:55
The memory access is dereferenceable (i.e., doesn&#39;t trap).
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:558
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:667
Context object for machine code objects.
Definition: MCContext.h:63
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
Definition: Lint.cpp:84
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:583
Class to represent array types.
Definition: DerivedTypes.h:369
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:126
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1127
auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1282
An instruction for storing to memory.
Definition: Instructions.h:321
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
amdgpu Simplify well known AMD library false Value * Callee
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Value * getOperand(unsigned i) const
Definition: User.h:170
Analysis containing CSE Info
Definition: CSEInfo.cpp:21
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
const BasicBlock & getEntryBlock() const
Definition: Function.h:640
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:610
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
Abstract class that contains various methods for clients to notify about changes. ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:787
unsigned const MachineRegisterInfo * MRI
Value * getCalledValue() const
Definition: InstrTypes.h:1174
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1508
Conditional or Unconditional Branch instruction.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Value * getAddress() const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
Value * getValue() const
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:281
Indirect Branch Instruction.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
Helper class to build MachineInstr.
BasicBlock * getDefaultDest() const
DIExpression * getExpression() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:646
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
Value * getPointerOperand()
Definition: Instructions.h:285
self_iterator getIterator()
Definition: ilist_node.h:82
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:181
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:319
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:663
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:609
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
size_t size() const
Definition: SmallVector.h:53
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:774
Simple wrapper that does the following.
Definition: CSEInfo.h:196
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:499
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const std::string & getConstraintString() const
Definition: InlineAsm.h:82
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:106
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:672
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:392
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Value * getValOperand()
Definition: Instructions.h:800
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:88
unsigned getNumOperands() const
Definition: User.h:192
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, Optional< unsigned > Flags=None)
Build and insert Res = G_MUL Op0, Op1.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
double Log2(double Value)
Return the log base 2 of the specified value.
Definition: MathExtras.h:528
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
iterator end() const
Definition: ArrayRef.h:138
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:258
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:644
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:180
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:622
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:194
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:539
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
bool isIntPredicate() const
Definition: InstrTypes.h:739
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
Class for arbitrary precision integers.
Definition: APInt.h:70
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:61
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
amdgpu Simplify well known AMD library false Value Value * Arg
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:436
Representation of each machine instruction.
Definition: MachineInstr.h:64
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:721
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
void addObserver(GISelChangeObserver *O)
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:354
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:311
bool isInlineAsm() const
Check if this call is an inline asm statement.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_FCMP PredOp0, Op1.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:551
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:652
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
void removeObserver(GISelChangeObserver *O)
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:373
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:194
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1225
verify safepoint Safepoint IR Verifier
Value * getPointerOperand()
Definition: Instructions.h:796
TargetOptions Options
Definition: TargetMachine.h:97
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1181
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:260
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
void push_back(MachineInstr *MI)
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:771
Pair of physical register and lane mask.
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
uint32_t Size
Definition: Profile.cpp:47
DILocalVariable * getVariable() const
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool isUnconditional() const
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
static void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
AsmDialect getDialect() const
Definition: InlineAsm.h:69
Multiway switch.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:483
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:566
LLVM Value Representation.
Definition: Value.h:73
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:302
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:419
succ_range successors(Instruction *I)
Definition: CFG.h:264
This file describes how to lower LLVM calls to machine code calls.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
Invoke instruction.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Simple wrapper observer that takes several observers, and calls each one for each event...
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1238
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:413
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:193
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:787
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:759
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:648
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:67
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:60
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:165