LLVM  8.0.1
AArch64InstructionSelector.cpp
Go to the documentation of this file.
1 //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// AArch64.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64InstrInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "AArch64TargetMachine.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/Support/Debug.h"
35 
36 #define DEBUG_TYPE "aarch64-isel"
37 
38 using namespace llvm;
39 
40 namespace {
41 
42 #define GET_GLOBALISEL_PREDICATE_BITSET
43 #include "AArch64GenGlobalISel.inc"
44 #undef GET_GLOBALISEL_PREDICATE_BITSET
45 
46 class AArch64InstructionSelector : public InstructionSelector {
47 public:
48  AArch64InstructionSelector(const AArch64TargetMachine &TM,
49  const AArch64Subtarget &STI,
50  const AArch64RegisterBankInfo &RBI);
51 
52  bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
53  static const char *getName() { return DEBUG_TYPE; }
54 
55 private:
56  /// tblgen-erated 'select' implementation, used as the initial selector for
57  /// the patterns that don't require complex C++.
58  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
59 
60  bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
61  MachineRegisterInfo &MRI) const;
62  bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
63  MachineRegisterInfo &MRI) const;
64 
65  bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
66  MachineRegisterInfo &MRI) const;
67 
68  // Helper to generate an equivalent of scalar_to_vector into a new register,
69  // returned via 'Dst'.
70  bool emitScalarToVector(unsigned &Dst, const LLT DstTy,
71  const TargetRegisterClass *DstRC, unsigned Scalar,
72  MachineBasicBlock &MBB,
74  MachineRegisterInfo &MRI) const;
75  bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
77 
78  ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
79 
80  ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
81  unsigned Size) const;
82 
83  ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
84  return selectAddrModeUnscaled(Root, 1);
85  }
86  ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
87  return selectAddrModeUnscaled(Root, 2);
88  }
89  ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
90  return selectAddrModeUnscaled(Root, 4);
91  }
92  ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
93  return selectAddrModeUnscaled(Root, 8);
94  }
95  ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
96  return selectAddrModeUnscaled(Root, 16);
97  }
98 
99  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
100  unsigned Size) const;
101  template <int Width>
102  ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
103  return selectAddrModeIndexed(Root, Width / 8);
104  }
105 
106  void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
107 
108  // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
109  void materializeLargeCMVal(MachineInstr &I, const Value *V,
110  unsigned char OpFlags) const;
111 
112  const AArch64TargetMachine &TM;
113  const AArch64Subtarget &STI;
114  const AArch64InstrInfo &TII;
115  const AArch64RegisterInfo &TRI;
116  const AArch64RegisterBankInfo &RBI;
117 
118 #define GET_GLOBALISEL_PREDICATES_DECL
119 #include "AArch64GenGlobalISel.inc"
120 #undef GET_GLOBALISEL_PREDICATES_DECL
121 
122 // We declare the temporaries used by selectImpl() in the class to minimize the
123 // cost of constructing placeholder values.
124 #define GET_GLOBALISEL_TEMPORARIES_DECL
125 #include "AArch64GenGlobalISel.inc"
126 #undef GET_GLOBALISEL_TEMPORARIES_DECL
127 };
128 
129 } // end anonymous namespace
130 
131 #define GET_GLOBALISEL_IMPL
132 #include "AArch64GenGlobalISel.inc"
133 #undef GET_GLOBALISEL_IMPL
134 
135 AArch64InstructionSelector::AArch64InstructionSelector(
136  const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
137  const AArch64RegisterBankInfo &RBI)
138  : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
139  TRI(*STI.getRegisterInfo()), RBI(RBI),
141 #include "AArch64GenGlobalISel.inc"
144 #include "AArch64GenGlobalISel.inc"
146 {
147 }
148 
149 // FIXME: This should be target-independent, inferred from the types declared
150 // for each class in the bank.
151 static const TargetRegisterClass *
152 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
153  const RegisterBankInfo &RBI,
154  bool GetAllRegSet = false) {
155  if (RB.getID() == AArch64::GPRRegBankID) {
156  if (Ty.getSizeInBits() <= 32)
157  return GetAllRegSet ? &AArch64::GPR32allRegClass
158  : &AArch64::GPR32RegClass;
159  if (Ty.getSizeInBits() == 64)
160  return GetAllRegSet ? &AArch64::GPR64allRegClass
161  : &AArch64::GPR64RegClass;
162  return nullptr;
163  }
164 
165  if (RB.getID() == AArch64::FPRRegBankID) {
166  if (Ty.getSizeInBits() <= 16)
167  return &AArch64::FPR16RegClass;
168  if (Ty.getSizeInBits() == 32)
169  return &AArch64::FPR32RegClass;
170  if (Ty.getSizeInBits() == 64)
171  return &AArch64::FPR64RegClass;
172  if (Ty.getSizeInBits() == 128)
173  return &AArch64::FPR128RegClass;
174  return nullptr;
175  }
176 
177  return nullptr;
178 }
179 
180 /// Check whether \p I is a currently unsupported binary operation:
181 /// - it has an unsized type
182 /// - an operand is not a vreg
183 /// - all operands are not in the same bank
184 /// These are checks that should someday live in the verifier, but right now,
185 /// these are mostly limitations of the aarch64 selector.
186 static bool unsupportedBinOp(const MachineInstr &I,
187  const AArch64RegisterBankInfo &RBI,
188  const MachineRegisterInfo &MRI,
189  const AArch64RegisterInfo &TRI) {
190  LLT Ty = MRI.getType(I.getOperand(0).getReg());
191  if (!Ty.isValid()) {
192  LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
193  return true;
194  }
195 
196  const RegisterBank *PrevOpBank = nullptr;
197  for (auto &MO : I.operands()) {
198  // FIXME: Support non-register operands.
199  if (!MO.isReg()) {
200  LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
201  return true;
202  }
203 
204  // FIXME: Can generic operations have physical registers operands? If
205  // so, this will need to be taught about that, and we'll need to get the
206  // bank out of the minimal class for the register.
207  // Either way, this needs to be documented (and possibly verified).
208  if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
209  LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
210  return true;
211  }
212 
213  const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
214  if (!OpBank) {
215  LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
216  return true;
217  }
218 
219  if (PrevOpBank && OpBank != PrevOpBank) {
220  LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
221  return true;
222  }
223  PrevOpBank = OpBank;
224  }
225  return false;
226 }
227 
228 /// Select the AArch64 opcode for the basic binary operation \p GenericOpc
229 /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
230 /// and of size \p OpSize.
231 /// \returns \p GenericOpc if the combination is unsupported.
232 static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
233  unsigned OpSize) {
234  switch (RegBankID) {
235  case AArch64::GPRRegBankID:
236  if (OpSize == 32) {
237  switch (GenericOpc) {
238  case TargetOpcode::G_SHL:
239  return AArch64::LSLVWr;
240  case TargetOpcode::G_LSHR:
241  return AArch64::LSRVWr;
242  case TargetOpcode::G_ASHR:
243  return AArch64::ASRVWr;
244  default:
245  return GenericOpc;
246  }
247  } else if (OpSize == 64) {
248  switch (GenericOpc) {
249  case TargetOpcode::G_GEP:
250  return AArch64::ADDXrr;
251  case TargetOpcode::G_SHL:
252  return AArch64::LSLVXr;
253  case TargetOpcode::G_LSHR:
254  return AArch64::LSRVXr;
255  case TargetOpcode::G_ASHR:
256  return AArch64::ASRVXr;
257  default:
258  return GenericOpc;
259  }
260  }
261  break;
262  case AArch64::FPRRegBankID:
263  switch (OpSize) {
264  case 32:
265  switch (GenericOpc) {
266  case TargetOpcode::G_FADD:
267  return AArch64::FADDSrr;
268  case TargetOpcode::G_FSUB:
269  return AArch64::FSUBSrr;
270  case TargetOpcode::G_FMUL:
271  return AArch64::FMULSrr;
272  case TargetOpcode::G_FDIV:
273  return AArch64::FDIVSrr;
274  default:
275  return GenericOpc;
276  }
277  case 64:
278  switch (GenericOpc) {
279  case TargetOpcode::G_FADD:
280  return AArch64::FADDDrr;
281  case TargetOpcode::G_FSUB:
282  return AArch64::FSUBDrr;
283  case TargetOpcode::G_FMUL:
284  return AArch64::FMULDrr;
285  case TargetOpcode::G_FDIV:
286  return AArch64::FDIVDrr;
287  case TargetOpcode::G_OR:
288  return AArch64::ORRv8i8;
289  default:
290  return GenericOpc;
291  }
292  }
293  break;
294  }
295  return GenericOpc;
296 }
297 
298 /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
299 /// appropriate for the (value) register bank \p RegBankID and of memory access
300 /// size \p OpSize. This returns the variant with the base+unsigned-immediate
301 /// addressing mode (e.g., LDRXui).
302 /// \returns \p GenericOpc if the combination is unsupported.
303 static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
304  unsigned OpSize) {
305  const bool isStore = GenericOpc == TargetOpcode::G_STORE;
306  switch (RegBankID) {
307  case AArch64::GPRRegBankID:
308  switch (OpSize) {
309  case 8:
310  return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
311  case 16:
312  return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
313  case 32:
314  return isStore ? AArch64::STRWui : AArch64::LDRWui;
315  case 64:
316  return isStore ? AArch64::STRXui : AArch64::LDRXui;
317  }
318  break;
319  case AArch64::FPRRegBankID:
320  switch (OpSize) {
321  case 8:
322  return isStore ? AArch64::STRBui : AArch64::LDRBui;
323  case 16:
324  return isStore ? AArch64::STRHui : AArch64::LDRHui;
325  case 32:
326  return isStore ? AArch64::STRSui : AArch64::LDRSui;
327  case 64:
328  return isStore ? AArch64::STRDui : AArch64::LDRDui;
329  }
330  break;
331  }
332  return GenericOpc;
333 }
334 
336  MachineRegisterInfo &MRI, unsigned SrcReg) {
337  // Copies from gpr32 to fpr16 need to use a sub-register copy.
338  unsigned CopyReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
339  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::COPY))
340  .addDef(CopyReg)
341  .addUse(SrcReg);
342  unsigned SubRegCopy = MRI.createVirtualRegister(&AArch64::FPR16RegClass);
343  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
344  .addDef(SubRegCopy)
345  .addUse(CopyReg, 0, AArch64::hsub);
346 
347  MachineOperand &RegOp = I.getOperand(1);
348  RegOp.setReg(SubRegCopy);
349  return true;
350 }
351 
354  const RegisterBankInfo &RBI) {
355 
356  unsigned DstReg = I.getOperand(0).getReg();
357  unsigned SrcReg = I.getOperand(1).getReg();
358 
359  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
360  if (TRI.getRegClass(AArch64::FPR16RegClassID)->contains(DstReg) &&
361  !TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
362  const RegisterBank &RegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
363  const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank(
364  MRI.getType(SrcReg), RegBank, RBI, /* GetAllRegSet */ true);
365  if (SrcRC == &AArch64::GPR32allRegClass)
366  return selectFP16CopyFromGPR32(I, TII, MRI, SrcReg);
367  }
368  assert(I.isCopy() && "Generic operators do not allow physical registers");
369  return true;
370  }
371 
372  const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
373  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
374  (void)DstSize;
375  const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
376  (void)SrcSize;
377  assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
378  "No phys reg on generic operators");
379  assert(
380  (DstSize == SrcSize ||
381  // Copies are a mean to setup initial types, the number of
382  // bits may not exactly match.
383  (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
384  DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
385  // Copies are a mean to copy bits around, as long as we are
386  // on the same register class, that's fine. Otherwise, that
387  // means we need some SUBREG_TO_REG or AND & co.
388  (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
389  "Copy with different width?!");
390  assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
391  "GPRs cannot get more than 64-bit width values");
392 
393  const TargetRegisterClass *RC = getRegClassForTypeOnBank(
394  MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true);
395  if (!RC) {
396  LLVM_DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
397  return false;
398  }
399 
400  if (!TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
401  const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(SrcReg);
402  const TargetRegisterClass *SrcRC =
403  RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
404  const RegisterBank *RB = nullptr;
405  if (!SrcRC) {
406  RB = RegClassOrBank.get<const RegisterBank *>();
407  SrcRC = getRegClassForTypeOnBank(MRI.getType(SrcReg), *RB, RBI, true);
408  }
409  // Copies from fpr16 to gpr32 need to use SUBREG_TO_REG.
410  if (RC == &AArch64::GPR32allRegClass && SrcRC == &AArch64::FPR16RegClass) {
411  unsigned PromoteReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
412  BuildMI(*I.getParent(), I, I.getDebugLoc(),
413  TII.get(AArch64::SUBREG_TO_REG))
414  .addDef(PromoteReg)
415  .addImm(0)
416  .addUse(SrcReg)
417  .addImm(AArch64::hsub);
418  MachineOperand &RegOp = I.getOperand(1);
419  RegOp.setReg(PromoteReg);
420  } else if (RC == &AArch64::FPR16RegClass &&
421  SrcRC == &AArch64::GPR32allRegClass) {
422  selectFP16CopyFromGPR32(I, TII, MRI, SrcReg);
423  }
424  }
425 
426  // No need to constrain SrcReg. It will get constrained when
427  // we hit another of its use or its defs.
428  // Copies do not have constraints.
429  if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
430  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
431  << " operand\n");
432  return false;
433  }
434  I.setDesc(TII.get(AArch64::COPY));
435  return true;
436 }
437 
438 static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
439  if (!DstTy.isScalar() || !SrcTy.isScalar())
440  return GenericOpc;
441 
442  const unsigned DstSize = DstTy.getSizeInBits();
443  const unsigned SrcSize = SrcTy.getSizeInBits();
444 
445  switch (DstSize) {
446  case 32:
447  switch (SrcSize) {
448  case 32:
449  switch (GenericOpc) {
450  case TargetOpcode::G_SITOFP:
451  return AArch64::SCVTFUWSri;
452  case TargetOpcode::G_UITOFP:
453  return AArch64::UCVTFUWSri;
454  case TargetOpcode::G_FPTOSI:
455  return AArch64::FCVTZSUWSr;
456  case TargetOpcode::G_FPTOUI:
457  return AArch64::FCVTZUUWSr;
458  default:
459  return GenericOpc;
460  }
461  case 64:
462  switch (GenericOpc) {
463  case TargetOpcode::G_SITOFP:
464  return AArch64::SCVTFUXSri;
465  case TargetOpcode::G_UITOFP:
466  return AArch64::UCVTFUXSri;
467  case TargetOpcode::G_FPTOSI:
468  return AArch64::FCVTZSUWDr;
469  case TargetOpcode::G_FPTOUI:
470  return AArch64::FCVTZUUWDr;
471  default:
472  return GenericOpc;
473  }
474  default:
475  return GenericOpc;
476  }
477  case 64:
478  switch (SrcSize) {
479  case 32:
480  switch (GenericOpc) {
481  case TargetOpcode::G_SITOFP:
482  return AArch64::SCVTFUWDri;
483  case TargetOpcode::G_UITOFP:
484  return AArch64::UCVTFUWDri;
485  case TargetOpcode::G_FPTOSI:
486  return AArch64::FCVTZSUXSr;
487  case TargetOpcode::G_FPTOUI:
488  return AArch64::FCVTZUUXSr;
489  default:
490  return GenericOpc;
491  }
492  case 64:
493  switch (GenericOpc) {
494  case TargetOpcode::G_SITOFP:
495  return AArch64::SCVTFUXDri;
496  case TargetOpcode::G_UITOFP:
497  return AArch64::UCVTFUXDri;
498  case TargetOpcode::G_FPTOSI:
499  return AArch64::FCVTZSUXDr;
500  case TargetOpcode::G_FPTOUI:
501  return AArch64::FCVTZUUXDr;
502  default:
503  return GenericOpc;
504  }
505  default:
506  return GenericOpc;
507  }
508  default:
509  return GenericOpc;
510  };
511  return GenericOpc;
512 }
513 
515  switch (P) {
516  default:
517  llvm_unreachable("Unknown condition code!");
518  case CmpInst::ICMP_NE:
519  return AArch64CC::NE;
520  case CmpInst::ICMP_EQ:
521  return AArch64CC::EQ;
522  case CmpInst::ICMP_SGT:
523  return AArch64CC::GT;
524  case CmpInst::ICMP_SGE:
525  return AArch64CC::GE;
526  case CmpInst::ICMP_SLT:
527  return AArch64CC::LT;
528  case CmpInst::ICMP_SLE:
529  return AArch64CC::LE;
530  case CmpInst::ICMP_UGT:
531  return AArch64CC::HI;
532  case CmpInst::ICMP_UGE:
533  return AArch64CC::HS;
534  case CmpInst::ICMP_ULT:
535  return AArch64CC::LO;
536  case CmpInst::ICMP_ULE:
537  return AArch64CC::LS;
538  }
539 }
540 
543  AArch64CC::CondCode &CondCode2) {
544  CondCode2 = AArch64CC::AL;
545  switch (P) {
546  default:
547  llvm_unreachable("Unknown FP condition!");
548  case CmpInst::FCMP_OEQ:
549  CondCode = AArch64CC::EQ;
550  break;
551  case CmpInst::FCMP_OGT:
552  CondCode = AArch64CC::GT;
553  break;
554  case CmpInst::FCMP_OGE:
555  CondCode = AArch64CC::GE;
556  break;
557  case CmpInst::FCMP_OLT:
558  CondCode = AArch64CC::MI;
559  break;
560  case CmpInst::FCMP_OLE:
561  CondCode = AArch64CC::LS;
562  break;
563  case CmpInst::FCMP_ONE:
564  CondCode = AArch64CC::MI;
565  CondCode2 = AArch64CC::GT;
566  break;
567  case CmpInst::FCMP_ORD:
568  CondCode = AArch64CC::VC;
569  break;
570  case CmpInst::FCMP_UNO:
571  CondCode = AArch64CC::VS;
572  break;
573  case CmpInst::FCMP_UEQ:
574  CondCode = AArch64CC::EQ;
575  CondCode2 = AArch64CC::VS;
576  break;
577  case CmpInst::FCMP_UGT:
578  CondCode = AArch64CC::HI;
579  break;
580  case CmpInst::FCMP_UGE:
581  CondCode = AArch64CC::PL;
582  break;
583  case CmpInst::FCMP_ULT:
584  CondCode = AArch64CC::LT;
585  break;
586  case CmpInst::FCMP_ULE:
587  CondCode = AArch64CC::LE;
588  break;
589  case CmpInst::FCMP_UNE:
590  CondCode = AArch64CC::NE;
591  break;
592  }
593 }
594 
595 bool AArch64InstructionSelector::selectCompareBranch(
597 
598  const unsigned CondReg = I.getOperand(0).getReg();
599  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
600  MachineInstr *CCMI = MRI.getVRegDef(CondReg);
601  if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
602  CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
603  if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
604  return false;
605 
606  unsigned LHS = CCMI->getOperand(2).getReg();
607  unsigned RHS = CCMI->getOperand(3).getReg();
608  if (!getConstantVRegVal(RHS, MRI))
609  std::swap(RHS, LHS);
610 
611  const auto RHSImm = getConstantVRegVal(RHS, MRI);
612  if (!RHSImm || *RHSImm != 0)
613  return false;
614 
615  const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
616  if (RB.getID() != AArch64::GPRRegBankID)
617  return false;
618 
619  const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
620  if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
621  return false;
622 
623  const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
624  unsigned CBOpc = 0;
625  if (CmpWidth <= 32)
626  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
627  else if (CmpWidth == 64)
628  CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
629  else
630  return false;
631 
632  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
633  .addUse(LHS)
634  .addMBB(DestMBB)
635  .constrainAllUses(TII, TRI, RBI);
636 
637  I.eraseFromParent();
638  return true;
639 }
640 
641 bool AArch64InstructionSelector::selectVaStartAAPCS(
642  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
643  return false;
644 }
645 
646 bool AArch64InstructionSelector::selectVaStartDarwin(
647  MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
649  unsigned ListReg = I.getOperand(0).getReg();
650 
651  unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
652 
653  auto MIB =
654  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
655  .addDef(ArgsAddrReg)
656  .addFrameIndex(FuncInfo->getVarArgsStackIndex())
657  .addImm(0)
658  .addImm(0);
659 
661 
662  MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
663  .addUse(ArgsAddrReg)
664  .addUse(ListReg)
665  .addImm(0)
667 
669  I.eraseFromParent();
670  return true;
671 }
672 
673 void AArch64InstructionSelector::materializeLargeCMVal(
674  MachineInstr &I, const Value *V, unsigned char OpFlags) const {
675  MachineBasicBlock &MBB = *I.getParent();
676  MachineFunction &MF = *MBB.getParent();
677  MachineRegisterInfo &MRI = MF.getRegInfo();
678  MachineIRBuilder MIB(I);
679 
680  auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
681  MovZ->addOperand(MF, I.getOperand(1));
682  MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
684  MovZ->addOperand(MF, MachineOperand::CreateImm(0));
686 
687  auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
688  unsigned ForceDstReg) {
689  unsigned DstReg = ForceDstReg
690  ? ForceDstReg
691  : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
692  auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
693  if (auto *GV = dyn_cast<GlobalValue>(V)) {
694  MovI->addOperand(MF, MachineOperand::CreateGA(
695  GV, MovZ->getOperand(1).getOffset(), Flags));
696  } else {
697  MovI->addOperand(
698  MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
699  MovZ->getOperand(1).getOffset(), Flags));
700  }
701  MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
703  return DstReg;
704  };
705  unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
707  DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
708  BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
709  return;
710 }
711 
712 bool AArch64InstructionSelector::select(MachineInstr &I,
713  CodeGenCoverage &CoverageInfo) const {
714  assert(I.getParent() && "Instruction should be in a basic block!");
715  assert(I.getParent()->getParent() && "Instruction should be in a function!");
716 
717  MachineBasicBlock &MBB = *I.getParent();
718  MachineFunction &MF = *MBB.getParent();
719  MachineRegisterInfo &MRI = MF.getRegInfo();
720 
721  unsigned Opcode = I.getOpcode();
722  // G_PHI requires same handling as PHI
723  if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
724  // Certain non-generic instructions also need some special handling.
725 
726  if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
727  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
728 
729  if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
730  const unsigned DefReg = I.getOperand(0).getReg();
731  const LLT DefTy = MRI.getType(DefReg);
732 
733  const TargetRegisterClass *DefRC = nullptr;
734  if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
735  DefRC = TRI.getRegClass(DefReg);
736  } else {
737  const RegClassOrRegBank &RegClassOrBank =
738  MRI.getRegClassOrRegBank(DefReg);
739 
740  DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
741  if (!DefRC) {
742  if (!DefTy.isValid()) {
743  LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
744  return false;
745  }
746  const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
747  DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
748  if (!DefRC) {
749  LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
750  return false;
751  }
752  }
753  }
754  I.setDesc(TII.get(TargetOpcode::PHI));
755 
756  return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
757  }
758 
759  if (I.isCopy())
760  return selectCopy(I, TII, MRI, TRI, RBI);
761 
762  return true;
763  }
764 
765 
766  if (I.getNumOperands() != I.getNumExplicitOperands()) {
767  LLVM_DEBUG(
768  dbgs() << "Generic instruction has unexpected implicit operands\n");
769  return false;
770  }
771 
772  if (selectImpl(I, CoverageInfo))
773  return true;
774 
775  LLT Ty =
776  I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
777 
778  switch (Opcode) {
779  case TargetOpcode::G_BRCOND: {
780  if (Ty.getSizeInBits() > 32) {
781  // We shouldn't need this on AArch64, but it would be implemented as an
782  // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
783  // bit being tested is < 32.
784  LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
785  << ", expected at most 32-bits");
786  return false;
787  }
788 
789  const unsigned CondReg = I.getOperand(0).getReg();
790  MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
791 
792  // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
793  // instructions will not be produced, as they are conditional branch
794  // instructions that do not set flags.
795  bool ProduceNonFlagSettingCondBr =
796  !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
797  if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI))
798  return true;
799 
800  if (ProduceNonFlagSettingCondBr) {
801  auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
802  .addUse(CondReg)
803  .addImm(/*bit offset=*/0)
804  .addMBB(DestMBB);
805 
806  I.eraseFromParent();
807  return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
808  } else {
809  auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
810  .addDef(AArch64::WZR)
811  .addUse(CondReg)
812  .addImm(1);
813  constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI);
814  auto Bcc =
815  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc))
816  .addImm(AArch64CC::EQ)
817  .addMBB(DestMBB);
818 
819  I.eraseFromParent();
820  return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI);
821  }
822  }
823 
824  case TargetOpcode::G_BRINDIRECT: {
825  I.setDesc(TII.get(AArch64::BR));
826  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
827  }
828 
829  case TargetOpcode::G_FCONSTANT:
830  case TargetOpcode::G_CONSTANT: {
831  const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
832 
833  const LLT s32 = LLT::scalar(32);
834  const LLT s64 = LLT::scalar(64);
835  const LLT p0 = LLT::pointer(0, 64);
836 
837  const unsigned DefReg = I.getOperand(0).getReg();
838  const LLT DefTy = MRI.getType(DefReg);
839  const unsigned DefSize = DefTy.getSizeInBits();
840  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
841 
842  // FIXME: Redundant check, but even less readable when factored out.
843  if (isFP) {
844  if (Ty != s32 && Ty != s64) {
845  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
846  << " constant, expected: " << s32 << " or " << s64
847  << '\n');
848  return false;
849  }
850 
851  if (RB.getID() != AArch64::FPRRegBankID) {
852  LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
853  << " constant on bank: " << RB
854  << ", expected: FPR\n");
855  return false;
856  }
857 
858  // The case when we have 0.0 is covered by tablegen. Reject it here so we
859  // can be sure tablegen works correctly and isn't rescued by this code.
860  if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
861  return false;
862  } else {
863  // s32 and s64 are covered by tablegen.
864  if (Ty != p0) {
865  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
866  << " constant, expected: " << s32 << ", " << s64
867  << ", or " << p0 << '\n');
868  return false;
869  }
870 
871  if (RB.getID() != AArch64::GPRRegBankID) {
872  LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
873  << " constant on bank: " << RB
874  << ", expected: GPR\n");
875  return false;
876  }
877  }
878 
879  const unsigned MovOpc =
880  DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
881 
882  I.setDesc(TII.get(MovOpc));
883 
884  if (isFP) {
885  const TargetRegisterClass &GPRRC =
886  DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
887  const TargetRegisterClass &FPRRC =
888  DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
889 
890  const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
891  MachineOperand &RegOp = I.getOperand(0);
892  RegOp.setReg(DefGPRReg);
893 
894  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
895  TII.get(AArch64::COPY))
896  .addDef(DefReg)
897  .addUse(DefGPRReg);
898 
899  if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
900  LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
901  return false;
902  }
903 
904  MachineOperand &ImmOp = I.getOperand(1);
905  // FIXME: Is going through int64_t always correct?
906  ImmOp.ChangeToImmediate(
908  } else if (I.getOperand(1).isCImm()) {
909  uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
910  I.getOperand(1).ChangeToImmediate(Val);
911  } else if (I.getOperand(1).isImm()) {
912  uint64_t Val = I.getOperand(1).getImm();
913  I.getOperand(1).ChangeToImmediate(Val);
914  }
915 
917  return true;
918  }
919  case TargetOpcode::G_EXTRACT: {
920  LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
921  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
922  (void)DstTy;
923  unsigned SrcSize = SrcTy.getSizeInBits();
924  // Larger extracts are vectors, same-size extracts should be something else
925  // by now (either split up or simplified to a COPY).
926  if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
927  return false;
928 
929  I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
931  Ty.getSizeInBits() - 1);
932 
933  if (SrcSize < 64) {
934  assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
935  "unexpected G_EXTRACT types");
936  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
937  }
938 
939  unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
940  BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
941  TII.get(AArch64::COPY))
942  .addDef(I.getOperand(0).getReg())
943  .addUse(DstReg, 0, AArch64::sub_32);
945  AArch64::GPR32RegClass, MRI);
946  I.getOperand(0).setReg(DstReg);
947 
948  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
949  }
950 
951  case TargetOpcode::G_INSERT: {
952  LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
953  LLT DstTy = MRI.getType(I.getOperand(0).getReg());
954  unsigned DstSize = DstTy.getSizeInBits();
955  // Larger inserts are vectors, same-size ones should be something else by
956  // now (split up or turned into COPYs).
957  if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
958  return false;
959 
960  I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
961  unsigned LSB = I.getOperand(3).getImm();
962  unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
963  I.getOperand(3).setImm((DstSize - LSB) % DstSize);
964  MachineInstrBuilder(MF, I).addImm(Width - 1);
965 
966  if (DstSize < 64) {
967  assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
968  "unexpected G_INSERT types");
969  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
970  }
971 
972  unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
973  BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
974  TII.get(AArch64::SUBREG_TO_REG))
975  .addDef(SrcReg)
976  .addImm(0)
977  .addUse(I.getOperand(2).getReg())
978  .addImm(AArch64::sub_32);
980  AArch64::GPR32RegClass, MRI);
981  I.getOperand(2).setReg(SrcReg);
982 
983  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
984  }
985  case TargetOpcode::G_FRAME_INDEX: {
986  // allocas and G_FRAME_INDEX are only supported in addrspace(0).
987  if (Ty != LLT::pointer(0, 64)) {
988  LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
989  << ", expected: " << LLT::pointer(0, 64) << '\n');
990  return false;
991  }
992  I.setDesc(TII.get(AArch64::ADDXri));
993 
994  // MOs for a #0 shifted immediate.
995  I.addOperand(MachineOperand::CreateImm(0));
996  I.addOperand(MachineOperand::CreateImm(0));
997 
998  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
999  }
1000 
1001  case TargetOpcode::G_GLOBAL_VALUE: {
1002  auto GV = I.getOperand(1).getGlobal();
1003  if (GV->isThreadLocal()) {
1004  // FIXME: we don't support TLS yet.
1005  return false;
1006  }
1007  unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
1008  if (OpFlags & AArch64II::MO_GOT) {
1009  I.setDesc(TII.get(AArch64::LOADgot));
1010  I.getOperand(1).setTargetFlags(OpFlags);
1011  } else if (TM.getCodeModel() == CodeModel::Large) {
1012  // Materialize the global using movz/movk instructions.
1013  materializeLargeCMVal(I, GV, OpFlags);
1014  I.eraseFromParent();
1015  return true;
1016  } else if (TM.getCodeModel() == CodeModel::Tiny) {
1017  I.setDesc(TII.get(AArch64::ADR));
1018  I.getOperand(1).setTargetFlags(OpFlags);
1019  } else {
1020  I.setDesc(TII.get(AArch64::MOVaddr));
1022  MachineInstrBuilder MIB(MF, I);
1023  MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
1025  }
1026  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1027  }
1028 
1029  case TargetOpcode::G_LOAD:
1030  case TargetOpcode::G_STORE: {
1031  LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
1032 
1033  if (PtrTy != LLT::pointer(0, 64)) {
1034  LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
1035  << ", expected: " << LLT::pointer(0, 64) << '\n');
1036  return false;
1037  }
1038 
1039  auto &MemOp = **I.memoperands_begin();
1040  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
1041  LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
1042  return false;
1043  }
1044  unsigned MemSizeInBits = MemOp.getSize() * 8;
1045 
1046  const unsigned PtrReg = I.getOperand(1).getReg();
1047 #ifndef NDEBUG
1048  const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
1049  // Sanity-check the pointer register.
1050  assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1051  "Load/Store pointer operand isn't a GPR");
1052  assert(MRI.getType(PtrReg).isPointer() &&
1053  "Load/Store pointer operand isn't a pointer");
1054 #endif
1055 
1056  const unsigned ValReg = I.getOperand(0).getReg();
1057  const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1058 
1059  const unsigned NewOpc =
1060  selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
1061  if (NewOpc == I.getOpcode())
1062  return false;
1063 
1064  I.setDesc(TII.get(NewOpc));
1065 
1066  uint64_t Offset = 0;
1067  auto *PtrMI = MRI.getVRegDef(PtrReg);
1068 
1069  // Try to fold a GEP into our unsigned immediate addressing mode.
1070  if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1071  if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1072  int64_t Imm = *COff;
1073  const unsigned Size = MemSizeInBits / 8;
1074  const unsigned Scale = Log2_32(Size);
1075  if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1076  unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1077  I.getOperand(1).setReg(Ptr2Reg);
1078  PtrMI = MRI.getVRegDef(Ptr2Reg);
1079  Offset = Imm / Size;
1080  }
1081  }
1082  }
1083 
1084  // If we haven't folded anything into our addressing mode yet, try to fold
1085  // a frame index into the base+offset.
1086  if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1087  I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1088 
1089  I.addOperand(MachineOperand::CreateImm(Offset));
1090 
1091  // If we're storing a 0, use WZR/XZR.
1092  if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1093  if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1094  if (I.getOpcode() == AArch64::STRWui)
1095  I.getOperand(0).setReg(AArch64::WZR);
1096  else if (I.getOpcode() == AArch64::STRXui)
1097  I.getOperand(0).setReg(AArch64::XZR);
1098  }
1099  }
1100 
1101  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1102  }
1103 
1104  case TargetOpcode::G_SMULH:
1105  case TargetOpcode::G_UMULH: {
1106  // Reject the various things we don't support yet.
1107  if (unsupportedBinOp(I, RBI, MRI, TRI))
1108  return false;
1109 
1110  const unsigned DefReg = I.getOperand(0).getReg();
1111  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1112 
1113  if (RB.getID() != AArch64::GPRRegBankID) {
1114  LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
1115  return false;
1116  }
1117 
1118  if (Ty != LLT::scalar(64)) {
1119  LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1120  << ", expected: " << LLT::scalar(64) << '\n');
1121  return false;
1122  }
1123 
1124  unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1125  : AArch64::UMULHrr;
1126  I.setDesc(TII.get(NewOpc));
1127 
1128  // Now that we selected an opcode, we need to constrain the register
1129  // operands to use appropriate classes.
1130  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1131  }
1132  case TargetOpcode::G_FADD:
1133  case TargetOpcode::G_FSUB:
1134  case TargetOpcode::G_FMUL:
1135  case TargetOpcode::G_FDIV:
1136 
1137  case TargetOpcode::G_OR:
1138  case TargetOpcode::G_SHL:
1139  case TargetOpcode::G_LSHR:
1140  case TargetOpcode::G_ASHR:
1141  case TargetOpcode::G_GEP: {
1142  // Reject the various things we don't support yet.
1143  if (unsupportedBinOp(I, RBI, MRI, TRI))
1144  return false;
1145 
1146  const unsigned OpSize = Ty.getSizeInBits();
1147 
1148  const unsigned DefReg = I.getOperand(0).getReg();
1149  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1150 
1151  const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1152  if (NewOpc == I.getOpcode())
1153  return false;
1154 
1155  I.setDesc(TII.get(NewOpc));
1156  // FIXME: Should the type be always reset in setDesc?
1157 
1158  // Now that we selected an opcode, we need to constrain the register
1159  // operands to use appropriate classes.
1160  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1161  }
1162 
1163  case TargetOpcode::G_PTR_MASK: {
1164  uint64_t Align = I.getOperand(2).getImm();
1165  if (Align >= 64 || Align == 0)
1166  return false;
1167 
1168  uint64_t Mask = ~((1ULL << Align) - 1);
1169  I.setDesc(TII.get(AArch64::ANDXri));
1171 
1172  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1173  }
1174  case TargetOpcode::G_PTRTOINT:
1175  case TargetOpcode::G_TRUNC: {
1176  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1177  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1178 
1179  const unsigned DstReg = I.getOperand(0).getReg();
1180  const unsigned SrcReg = I.getOperand(1).getReg();
1181 
1182  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1183  const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1184 
1185  if (DstRB.getID() != SrcRB.getID()) {
1186  LLVM_DEBUG(
1187  dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
1188  return false;
1189  }
1190 
1191  if (DstRB.getID() == AArch64::GPRRegBankID) {
1192  const TargetRegisterClass *DstRC =
1193  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1194  if (!DstRC)
1195  return false;
1196 
1197  const TargetRegisterClass *SrcRC =
1198  getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1199  if (!SrcRC)
1200  return false;
1201 
1202  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1203  !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1204  LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
1205  return false;
1206  }
1207 
1208  if (DstRC == SrcRC) {
1209  // Nothing to be done
1210  } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1211  SrcTy == LLT::scalar(64)) {
1212  llvm_unreachable("TableGen can import this case");
1213  return false;
1214  } else if (DstRC == &AArch64::GPR32RegClass &&
1215  SrcRC == &AArch64::GPR64RegClass) {
1216  I.getOperand(1).setSubReg(AArch64::sub_32);
1217  } else {
1218  LLVM_DEBUG(
1219  dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
1220  return false;
1221  }
1222 
1223  I.setDesc(TII.get(TargetOpcode::COPY));
1224  return true;
1225  } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1226  if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1227  I.setDesc(TII.get(AArch64::XTNv4i16));
1229  return true;
1230  }
1231  }
1232 
1233  return false;
1234  }
1235 
1236  case TargetOpcode::G_ANYEXT: {
1237  const unsigned DstReg = I.getOperand(0).getReg();
1238  const unsigned SrcReg = I.getOperand(1).getReg();
1239 
1240  const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1241  if (RBDst.getID() != AArch64::GPRRegBankID) {
1242  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1243  << ", expected: GPR\n");
1244  return false;
1245  }
1246 
1247  const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1248  if (RBSrc.getID() != AArch64::GPRRegBankID) {
1249  LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1250  << ", expected: GPR\n");
1251  return false;
1252  }
1253 
1254  const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1255 
1256  if (DstSize == 0) {
1257  LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
1258  return false;
1259  }
1260 
1261  if (DstSize != 64 && DstSize > 32) {
1262  LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1263  << ", expected: 32 or 64\n");
1264  return false;
1265  }
1266  // At this point G_ANYEXT is just like a plain COPY, but we need
1267  // to explicitly form the 64-bit value if any.
1268  if (DstSize > 32) {
1269  unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1270  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1271  .addDef(ExtSrc)
1272  .addImm(0)
1273  .addUse(SrcReg)
1274  .addImm(AArch64::sub_32);
1275  I.getOperand(1).setReg(ExtSrc);
1276  }
1277  return selectCopy(I, TII, MRI, TRI, RBI);
1278  }
1279 
1280  case TargetOpcode::G_ZEXT:
1281  case TargetOpcode::G_SEXT: {
1282  unsigned Opcode = I.getOpcode();
1283  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1284  SrcTy = MRI.getType(I.getOperand(1).getReg());
1285  const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1286  const unsigned DefReg = I.getOperand(0).getReg();
1287  const unsigned SrcReg = I.getOperand(1).getReg();
1288  const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1289 
1290  if (RB.getID() != AArch64::GPRRegBankID) {
1291  LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1292  << ", expected: GPR\n");
1293  return false;
1294  }
1295 
1296  MachineInstr *ExtI;
1297  if (DstTy == LLT::scalar(64)) {
1298  // FIXME: Can we avoid manually doing this?
1299  if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
1300  LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1301  << " operand\n");
1302  return false;
1303  }
1304 
1305  const unsigned SrcXReg =
1306  MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1307  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1308  .addDef(SrcXReg)
1309  .addImm(0)
1310  .addUse(SrcReg)
1311  .addImm(AArch64::sub_32);
1312 
1313  const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1314  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1315  .addDef(DefReg)
1316  .addUse(SrcXReg)
1317  .addImm(0)
1318  .addImm(SrcTy.getSizeInBits() - 1);
1319  } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
1320  const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1321  ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1322  .addDef(DefReg)
1323  .addUse(SrcReg)
1324  .addImm(0)
1325  .addImm(SrcTy.getSizeInBits() - 1);
1326  } else {
1327  return false;
1328  }
1329 
1331 
1332  I.eraseFromParent();
1333  return true;
1334  }
1335 
1336  case TargetOpcode::G_SITOFP:
1337  case TargetOpcode::G_UITOFP:
1338  case TargetOpcode::G_FPTOSI:
1339  case TargetOpcode::G_FPTOUI: {
1340  const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1341  SrcTy = MRI.getType(I.getOperand(1).getReg());
1342  const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1343  if (NewOpc == Opcode)
1344  return false;
1345 
1346  I.setDesc(TII.get(NewOpc));
1348 
1349  return true;
1350  }
1351 
1352 
1353  case TargetOpcode::G_INTTOPTR:
1354  // The importer is currently unable to import pointer types since they
1355  // didn't exist in SelectionDAG.
1356  return selectCopy(I, TII, MRI, TRI, RBI);
1357 
1358  case TargetOpcode::G_BITCAST:
1359  // Imported SelectionDAG rules can handle every bitcast except those that
1360  // bitcast from a type to the same type. Ideally, these shouldn't occur
1361  // but we might not run an optimizer that deletes them.
1362  if (MRI.getType(I.getOperand(0).getReg()) ==
1363  MRI.getType(I.getOperand(1).getReg()))
1364  return selectCopy(I, TII, MRI, TRI, RBI);
1365  return false;
1366 
1367  case TargetOpcode::G_SELECT: {
1368  if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
1369  LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1370  << ", expected: " << LLT::scalar(1) << '\n');
1371  return false;
1372  }
1373 
1374  const unsigned CondReg = I.getOperand(1).getReg();
1375  const unsigned TReg = I.getOperand(2).getReg();
1376  const unsigned FReg = I.getOperand(3).getReg();
1377 
1378  unsigned CSelOpc = 0;
1379 
1380  if (Ty == LLT::scalar(32)) {
1381  CSelOpc = AArch64::CSELWr;
1382  } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
1383  CSelOpc = AArch64::CSELXr;
1384  } else {
1385  return false;
1386  }
1387 
1388  MachineInstr &TstMI =
1389  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1390  .addDef(AArch64::WZR)
1391  .addUse(CondReg)
1393 
1394  MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1395  .addDef(I.getOperand(0).getReg())
1396  .addUse(TReg)
1397  .addUse(FReg)
1399 
1401  constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1402 
1403  I.eraseFromParent();
1404  return true;
1405  }
1406  case TargetOpcode::G_ICMP: {
1407  if (Ty != LLT::scalar(32)) {
1408  LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1409  << ", expected: " << LLT::scalar(32) << '\n');
1410  return false;
1411  }
1412 
1413  unsigned CmpOpc = 0;
1414  unsigned ZReg = 0;
1415 
1416  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1417  if (CmpTy == LLT::scalar(32)) {
1418  CmpOpc = AArch64::SUBSWrr;
1419  ZReg = AArch64::WZR;
1420  } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1421  CmpOpc = AArch64::SUBSXrr;
1422  ZReg = AArch64::XZR;
1423  } else {
1424  return false;
1425  }
1426 
1427  // CSINC increments the result by one when the condition code is false.
1428  // Therefore, we have to invert the predicate to get an increment by 1 when
1429  // the predicate is true.
1430  const AArch64CC::CondCode invCC =
1431  changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1433 
1434  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1435  .addDef(ZReg)
1436  .addUse(I.getOperand(2).getReg())
1437  .addUse(I.getOperand(3).getReg());
1438 
1439  MachineInstr &CSetMI =
1440  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1441  .addDef(I.getOperand(0).getReg())
1442  .addUse(AArch64::WZR)
1443  .addUse(AArch64::WZR)
1444  .addImm(invCC);
1445 
1447  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1448 
1449  I.eraseFromParent();
1450  return true;
1451  }
1452 
1453  case TargetOpcode::G_FCMP: {
1454  if (Ty != LLT::scalar(32)) {
1455  LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1456  << ", expected: " << LLT::scalar(32) << '\n');
1457  return false;
1458  }
1459 
1460  unsigned CmpOpc = 0;
1461  LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1462  if (CmpTy == LLT::scalar(32)) {
1463  CmpOpc = AArch64::FCMPSrr;
1464  } else if (CmpTy == LLT::scalar(64)) {
1465  CmpOpc = AArch64::FCMPDrr;
1466  } else {
1467  return false;
1468  }
1469 
1470  // FIXME: regbank
1471 
1472  AArch64CC::CondCode CC1, CC2;
1474  (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1475 
1476  MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1477  .addUse(I.getOperand(2).getReg())
1478  .addUse(I.getOperand(3).getReg());
1479 
1480  const unsigned DefReg = I.getOperand(0).getReg();
1481  unsigned Def1Reg = DefReg;
1482  if (CC2 != AArch64CC::AL)
1483  Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1484 
1485  MachineInstr &CSetMI =
1486  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1487  .addDef(Def1Reg)
1488  .addUse(AArch64::WZR)
1489  .addUse(AArch64::WZR)
1490  .addImm(getInvertedCondCode(CC1));
1491 
1492  if (CC2 != AArch64CC::AL) {
1493  unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1494  MachineInstr &CSet2MI =
1495  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1496  .addDef(Def2Reg)
1497  .addUse(AArch64::WZR)
1498  .addUse(AArch64::WZR)
1499  .addImm(getInvertedCondCode(CC2));
1500  MachineInstr &OrMI =
1501  *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1502  .addDef(DefReg)
1503  .addUse(Def1Reg)
1504  .addUse(Def2Reg);
1506  constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1507  }
1508 
1510  constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1511 
1512  I.eraseFromParent();
1513  return true;
1514  }
1515  case TargetOpcode::G_VASTART:
1516  return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1517  : selectVaStartAAPCS(I, MF, MRI);
1518  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1519  if (!I.getOperand(0).isIntrinsicID())
1520  return false;
1522  return false;
1523  BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::BRK))
1524  .addImm(1);
1525  I.eraseFromParent();
1526  return true;
1527  case TargetOpcode::G_IMPLICIT_DEF: {
1528  I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1529  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1530  const unsigned DstReg = I.getOperand(0).getReg();
1531  const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1532  const TargetRegisterClass *DstRC =
1533  getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1534  RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
1535  return true;
1536  }
1537  case TargetOpcode::G_BLOCK_ADDR: {
1538  if (TM.getCodeModel() == CodeModel::Large) {
1539  materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
1540  I.eraseFromParent();
1541  return true;
1542  } else {
1543  I.setDesc(TII.get(AArch64::MOVaddrBA));
1544  auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
1545  I.getOperand(0).getReg())
1546  .addBlockAddress(I.getOperand(1).getBlockAddress(),
1547  /* Offset */ 0, AArch64II::MO_PAGE)
1548  .addBlockAddress(
1549  I.getOperand(1).getBlockAddress(), /* Offset */ 0,
1551  I.eraseFromParent();
1552  return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
1553  }
1554  }
1555  case TargetOpcode::G_BUILD_VECTOR:
1556  return selectBuildVector(I, MRI);
1557  case TargetOpcode::G_MERGE_VALUES:
1558  return selectMergeValues(I, MRI);
1559  }
1560 
1561  return false;
1562 }
1563 
1564 bool AArch64InstructionSelector::emitScalarToVector(
1565  unsigned &Dst, const LLT DstTy, const TargetRegisterClass *DstRC,
1566  unsigned Scalar, MachineBasicBlock &MBB,
1568  Dst = MRI.createVirtualRegister(DstRC);
1569 
1570  unsigned UndefVec = MRI.createVirtualRegister(DstRC);
1571  MachineInstr &UndefMI = *BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1572  TII.get(TargetOpcode::IMPLICIT_DEF))
1573  .addDef(UndefVec);
1574 
1575  auto BuildFn = [&](unsigned SubregIndex) {
1576  MachineInstr &InsMI = *BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1577  TII.get(TargetOpcode::INSERT_SUBREG))
1578  .addDef(Dst)
1579  .addUse(UndefVec)
1580  .addUse(Scalar)
1581  .addImm(SubregIndex);
1582  constrainSelectedInstRegOperands(UndefMI, TII, TRI, RBI);
1583  return constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI);
1584  };
1585 
1586  switch (DstTy.getElementType().getSizeInBits()) {
1587  case 32:
1588  return BuildFn(AArch64::ssub);
1589  case 64:
1590  return BuildFn(AArch64::dsub);
1591  default:
1592  return false;
1593  }
1594 }
1595 
1597  MachineInstr &I, MachineRegisterInfo &MRI) const {
1598  assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
1599  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1600  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1601  assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
1602 
1603  // At the moment we only support merging two s32s into an s64.
1604  if (I.getNumOperands() != 3)
1605  return false;
1606  if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
1607  return false;
1608  const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
1609  if (RB.getID() != AArch64::GPRRegBankID)
1610  return false;
1611 
1612  auto *DstRC = &AArch64::GPR64RegClass;
1613  unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
1614  MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1615  TII.get(TargetOpcode::SUBREG_TO_REG))
1616  .addDef(SubToRegDef)
1617  .addImm(0)
1618  .addUse(I.getOperand(1).getReg())
1619  .addImm(AArch64::sub_32);
1620  unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
1621  // Need to anyext the second scalar before we can use bfm
1622  MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1623  TII.get(TargetOpcode::SUBREG_TO_REG))
1624  .addDef(SubToRegDef2)
1625  .addImm(0)
1626  .addUse(I.getOperand(2).getReg())
1627  .addImm(AArch64::sub_32);
1628  MachineInstr &BFM =
1629  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
1630  .addDef(I.getOperand(0).getReg())
1631  .addUse(SubToRegDef)
1632  .addUse(SubToRegDef2)
1633  .addImm(32)
1634  .addImm(31);
1635  constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
1636  constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
1638  I.eraseFromParent();
1639  return true;
1640 }
1641 
1642 bool AArch64InstructionSelector::selectBuildVector(
1643  MachineInstr &I, MachineRegisterInfo &MRI) const {
1644  assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
1645  // Until we port more of the optimized selections, for now just use a vector
1646  // insert sequence.
1647  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1648  const LLT EltTy = MRI.getType(I.getOperand(1).getReg());
1649  unsigned EltSize = EltTy.getSizeInBits();
1650  if (EltSize < 32 || EltSize > 64)
1651  return false; // Don't support all element types yet.
1652  const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
1653  unsigned Opc;
1654  unsigned SubregIdx;
1655  if (RB.getID() == AArch64::GPRRegBankID) {
1656  if (EltSize == 32) {
1657  Opc = AArch64::INSvi32gpr;
1658  SubregIdx = AArch64::ssub;
1659  } else {
1660  Opc = AArch64::INSvi64gpr;
1661  SubregIdx = AArch64::dsub;
1662  }
1663  } else {
1664  if (EltSize == 32) {
1665  Opc = AArch64::INSvi32lane;
1666  SubregIdx = AArch64::ssub;
1667  } else {
1668  Opc = AArch64::INSvi64lane;
1669  SubregIdx = AArch64::dsub;
1670  }
1671  }
1672 
1673  if (EltSize * DstTy.getNumElements() != 128)
1674  return false; // Don't handle unpacked vectors yet.
1675 
1676  unsigned DstVec = 0;
1677  const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1678  DstTy, RBI.getRegBank(AArch64::FPRRegBankID), RBI);
1679  emitScalarToVector(DstVec, DstTy, DstRC, I.getOperand(1).getReg(),
1680  *I.getParent(), I.getIterator(), MRI);
1681  for (unsigned i = 2, e = DstTy.getSizeInBits() / EltSize + 1; i < e; ++i) {
1682  unsigned InsDef;
1683  // For the last insert re-use the dst reg of the G_BUILD_VECTOR.
1684  if (i + 1 < e)
1685  InsDef = MRI.createVirtualRegister(DstRC);
1686  else
1687  InsDef = I.getOperand(0).getReg();
1688  unsigned LaneIdx = i - 1;
1689  if (RB.getID() == AArch64::FPRRegBankID) {
1690  unsigned ImpDef = MRI.createVirtualRegister(DstRC);
1691  MachineInstr &ImpDefMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1692  TII.get(TargetOpcode::IMPLICIT_DEF))
1693  .addDef(ImpDef);
1694  unsigned InsSubDef = MRI.createVirtualRegister(DstRC);
1695  MachineInstr &InsSubMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1696  TII.get(TargetOpcode::INSERT_SUBREG))
1697  .addDef(InsSubDef)
1698  .addUse(ImpDef)
1699  .addUse(I.getOperand(i).getReg())
1700  .addImm(SubregIdx);
1701  MachineInstr &InsEltMI =
1702  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
1703  .addDef(InsDef)
1704  .addUse(DstVec)
1705  .addImm(LaneIdx)
1706  .addUse(InsSubDef)
1707  .addImm(0);
1708  constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
1709  constrainSelectedInstRegOperands(InsSubMI, TII, TRI, RBI);
1710  constrainSelectedInstRegOperands(InsEltMI, TII, TRI, RBI);
1711  DstVec = InsDef;
1712  } else {
1713  MachineInstr &InsMI =
1714  *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
1715  .addDef(InsDef)
1716  .addUse(DstVec)
1717  .addImm(LaneIdx)
1718  .addUse(I.getOperand(i).getReg());
1720  DstVec = InsDef;
1721  }
1722  }
1723  I.eraseFromParent();
1724  return true;
1725 }
1726 
1727 /// SelectArithImmed - Select an immediate value that can be represented as
1728 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
1729 /// Val set to the 12-bit value and Shift set to the shifter operand.
1731 AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
1732  MachineInstr &MI = *Root.getParent();
1733  MachineBasicBlock &MBB = *MI.getParent();
1734  MachineFunction &MF = *MBB.getParent();
1735  MachineRegisterInfo &MRI = MF.getRegInfo();
1736 
1737  // This function is called from the addsub_shifted_imm ComplexPattern,
1738  // which lists [imm] as the list of opcode it's interested in, however
1739  // we still need to check whether the operand is actually an immediate
1740  // here because the ComplexPattern opcode list is only used in
1741  // root-level opcode matching.
1742  uint64_t Immed;
1743  if (Root.isImm())
1744  Immed = Root.getImm();
1745  else if (Root.isCImm())
1746  Immed = Root.getCImm()->getZExtValue();
1747  else if (Root.isReg()) {
1748  MachineInstr *Def = MRI.getVRegDef(Root.getReg());
1749  if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
1750  return None;
1751  MachineOperand &Op1 = Def->getOperand(1);
1752  if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
1753  return None;
1754  Immed = Op1.getCImm()->getZExtValue();
1755  } else
1756  return None;
1757 
1758  unsigned ShiftAmt;
1759 
1760  if (Immed >> 12 == 0) {
1761  ShiftAmt = 0;
1762  } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
1763  ShiftAmt = 12;
1764  Immed = Immed >> 12;
1765  } else
1766  return None;
1767 
1768  unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
1769  return {{
1770  [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
1771  [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
1772  }};
1773 }
1774 
1775 /// Select a "register plus unscaled signed 9-bit immediate" address. This
1776 /// should only match when there is an offset that is not valid for a scaled
1777 /// immediate addressing mode. The "Size" argument is the size in bytes of the
1778 /// memory reference, which is needed here to know what is valid for a scaled
1779 /// immediate.
1781 AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
1782  unsigned Size) const {
1783  MachineRegisterInfo &MRI =
1784  Root.getParent()->getParent()->getParent()->getRegInfo();
1785 
1786  if (!Root.isReg())
1787  return None;
1788 
1789  if (!isBaseWithConstantOffset(Root, MRI))
1790  return None;
1791 
1792  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1793  if (!RootDef)
1794  return None;
1795 
1796  MachineOperand &OffImm = RootDef->getOperand(2);
1797  if (!OffImm.isReg())
1798  return None;
1799  MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
1800  if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
1801  return None;
1802  int64_t RHSC;
1803  MachineOperand &RHSOp1 = RHS->getOperand(1);
1804  if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
1805  return None;
1806  RHSC = RHSOp1.getCImm()->getSExtValue();
1807 
1808  // If the offset is valid as a scaled immediate, don't match here.
1809  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
1810  return None;
1811  if (RHSC >= -256 && RHSC < 256) {
1812  MachineOperand &Base = RootDef->getOperand(1);
1813  return {{
1814  [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
1815  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
1816  }};
1817  }
1818  return None;
1819 }
1820 
1821 /// Select a "register plus scaled unsigned 12-bit immediate" address. The
1822 /// "Size" argument is the size in bytes of the memory reference, which
1823 /// determines the scale.
1825 AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
1826  unsigned Size) const {
1827  MachineRegisterInfo &MRI =
1828  Root.getParent()->getParent()->getParent()->getRegInfo();
1829 
1830  if (!Root.isReg())
1831  return None;
1832 
1833  MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1834  if (!RootDef)
1835  return None;
1836 
1837  if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1838  return {{
1839  [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
1840  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1841  }};
1842  }
1843 
1844  if (isBaseWithConstantOffset(Root, MRI)) {
1845  MachineOperand &LHS = RootDef->getOperand(1);
1846  MachineOperand &RHS = RootDef->getOperand(2);
1847  MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1848  MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1849  if (LHSDef && RHSDef) {
1850  int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
1851  unsigned Scale = Log2_32(Size);
1852  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
1853  if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1854  return {{
1855  [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
1856  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1857  }};
1858 
1859  return {{
1860  [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
1861  [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1862  }};
1863  }
1864  }
1865  }
1866 
1867  // Before falling back to our general case, check if the unscaled
1868  // instructions can handle this. If so, that's preferable.
1869  if (selectAddrModeUnscaled(Root, Size).hasValue())
1870  return None;
1871 
1872  return {{
1873  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1874  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1875  }};
1876 }
1877 
1878 void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
1879  const MachineInstr &MI) const {
1880  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1881  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
1883  assert(CstVal && "Expected constant value");
1884  MIB.addImm(CstVal.getValue());
1885 }
1886 
1887 namespace llvm {
1890  AArch64Subtarget &Subtarget,
1891  AArch64RegisterBankInfo &RBI) {
1892  return new AArch64InstructionSelector(TM, Subtarget, RBI);
1893 }
1894 }
static bool selectFP16CopyFromGPR32(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, unsigned SrcReg)
const NoneType None
Definition: None.h:24
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1563
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
MachineBasicBlock * getMBB() const
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
This class represents lattice values for constants.
Definition: AllocatorList.h:24
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
void setTargetFlags(unsigned F)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
bool isScalar() const
static CondCode getInvertedCondCode(CondCode Code)
unsigned getReg() const
getReg - Returns the register number.
static uint64_t selectImpl(uint64_t CandidateMask, uint64_t &NextInSequenceMask)
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:321
unsigned const TargetRegisterInfo * TRI
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:459
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:143
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
This file declares the targeting of the RegisterBankInfo class for AArch64.
bool isIntrinsicID() const
bool isVector() const
Holds all the information related to register banks.
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:412
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
static StringRef getName(Value *V)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
LLT getElementType() const
Returns the vector&#39;s element type. Only valid for vector types.
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
const RegClassOrRegBank & getRegClassOrRegBank(unsigned Reg) const
Return the register bank or register class of Reg.
static bool isStore(int Opcode)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:959
#define EQ(a, b)
Definition: regexec.c:112
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
TargetInstrInfo - Interface to description of machine instruction set.
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV)...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:629
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
unsigned const MachineRegisterInfo * MRI
static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the (value)...
static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy)
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
const GlobalValue * getGlobal() const
Helper class to build MachineInstr.
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isExactlyValue(double V) const
We don&#39;t rely on operator== working on double values, as it returns true for things that are clearly ...
Definition: APFloat.h:1130
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:646
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
self_iterator getIterator()
Definition: ilist_node.h:82
const MachineInstrBuilder & addFrameIndex(int Idx) const
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwises returns null...
Definition: PointerUnion.h:142
bool isCopy() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:51
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
#define GET_GLOBALISEL_TEMPORARIES_INIT
const APFloat & getValueAPF() const
Definition: Constants.h:303
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:534
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
MachineOperand class - Representation of each machine instruction operand.
Intrinsic::ID getIntrinsicID() const
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:88
This class implements the register bank concept.
Definition: RegisterBank.h:29
int64_t getImm() const
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:539
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:185
bool isPointer() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:254
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
This class provides the information for the target register banks.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P)
InstructionSelector * createAArch64InstructionSelector(const AArch64TargetMachine &, AArch64Subtarget &, AArch64RegisterBankInfo &)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
int64_t getOffset() const
Return the offset from the symbol in this operand.
const BlockAddress * getBlockAddress() const
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
void setSubReg(unsigned subReg)
static const TargetRegisterClass * constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
#define GET_GLOBALISEL_PREDICATES_INIT
uint32_t Size
Definition: Profile.cpp:47
bool isReg() const
isReg - Tests if this is a MO_Register operand.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:135
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void changeFCMPPredToAArch64CC(CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel...
Definition: TargetOpcodes.h:31
LLVM Value Representation.
Definition: Value.h:73
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
void ChangeToFrameIndex(int Idx)
Replace this operand with a frame index.
IRTranslator LLVM IR MI
static bool unsupportedBinOp(const MachineInstr &I, const AArch64RegisterBankInfo &RBI, const MachineRegisterInfo &MRI, const AArch64RegisterInfo &TRI)
Check whether I is a currently unsupported binary operation:
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
APInt bitcastToAPInt() const
Definition: APFloat.h:1094
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:157
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
const ConstantInt * getCImm() const
#define DEBUG_TYPE
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:48
A discriminated union of two pointer types, with the discriminator in the low bit of the pointer...
Definition: PointerUnion.h:87
unsigned getPredicate() const