LLVM  8.0.1
AArch64LegalizerInfo.cpp
Go to the documentation of this file.
1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the Machinelegalizer class for
11 /// AArch64.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64LegalizerInfo.h"
16 #include "AArch64Subtarget.h"
22 #include "llvm/IR/DerivedTypes.h"
23 #include "llvm/IR/Type.h"
24 
25 using namespace llvm;
26 using namespace LegalizeActions;
27 using namespace LegalityPredicates;
28 
30  using namespace TargetOpcode;
31  const LLT p0 = LLT::pointer(0, 64);
32  const LLT s1 = LLT::scalar(1);
33  const LLT s8 = LLT::scalar(8);
34  const LLT s16 = LLT::scalar(16);
35  const LLT s32 = LLT::scalar(32);
36  const LLT s64 = LLT::scalar(64);
37  const LLT s128 = LLT::scalar(128);
38  const LLT s256 = LLT::scalar(256);
39  const LLT s512 = LLT::scalar(512);
40  const LLT v16s8 = LLT::vector(16, 8);
41  const LLT v8s8 = LLT::vector(8, 8);
42  const LLT v4s8 = LLT::vector(4, 8);
43  const LLT v8s16 = LLT::vector(8, 16);
44  const LLT v4s16 = LLT::vector(4, 16);
45  const LLT v2s16 = LLT::vector(2, 16);
46  const LLT v2s32 = LLT::vector(2, 32);
47  const LLT v4s32 = LLT::vector(4, 32);
48  const LLT v2s64 = LLT::vector(2, 64);
49 
50  getActionDefinitionsBuilder(G_IMPLICIT_DEF)
51  .legalFor({p0, s1, s8, s16, s32, s64, v2s64})
52  .clampScalar(0, s1, s64)
53  .widenScalarToNextPow2(0, 8)
54  .fewerElementsIf(
55  [=](const LegalityQuery &Query) {
56  return Query.Types[0].isVector() &&
57  (Query.Types[0].getElementType() != s64 ||
58  Query.Types[0].getNumElements() != 2);
59  },
60  [=](const LegalityQuery &Query) {
61  LLT EltTy = Query.Types[0].getElementType();
62  if (EltTy == s64)
63  return std::make_pair(0, LLT::vector(2, 64));
64  return std::make_pair(0, EltTy);
65  });
66 
67  getActionDefinitionsBuilder(G_PHI)
68  .legalFor({p0, s16, s32, s64})
69  .clampScalar(0, s16, s64)
70  .widenScalarToNextPow2(0);
71 
72  getActionDefinitionsBuilder(G_BSWAP)
73  .legalFor({s32, s64})
74  .clampScalar(0, s16, s64)
75  .widenScalarToNextPow2(0);
76 
77  getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR, G_SHL})
78  .legalFor({s32, s64, v2s32, v4s32, v2s64})
79  .clampScalar(0, s32, s64)
80  .widenScalarToNextPow2(0)
81  .clampNumElements(0, v2s32, v4s32)
82  .clampNumElements(0, v2s64, v2s64)
83  .moreElementsToNextPow2(0);
84 
85  getActionDefinitionsBuilder(G_GEP)
86  .legalFor({{p0, s64}})
87  .clampScalar(1, s64, s64);
88 
89  getActionDefinitionsBuilder(G_PTR_MASK).legalFor({p0});
90 
91  getActionDefinitionsBuilder({G_LSHR, G_ASHR, G_SDIV, G_UDIV})
92  .legalFor({s32, s64})
93  .clampScalar(0, s32, s64)
94  .widenScalarToNextPow2(0);
95 
96  getActionDefinitionsBuilder({G_SREM, G_UREM})
97  .lowerFor({s1, s8, s16, s32, s64});
98 
99  getActionDefinitionsBuilder({G_SMULO, G_UMULO})
100  .lowerFor({{s64, s1}});
101 
102  getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
103 
104  getActionDefinitionsBuilder({G_UADDE, G_USUBE, G_SADDO, G_SSUBO})
105  .legalFor({{s32, s1}, {s64, s1}});
106 
107  getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMA, G_FMUL, G_FDIV})
108  .legalFor({s32, s64});
109 
110  getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
111 
112  getActionDefinitionsBuilder(G_FCEIL)
113  // If we don't have full FP16 support, then widen s16 to s32 if we
114  // encounter it.
115  .widenScalarIf(
116  [=, &ST](const LegalityQuery &Query) {
117  return Query.Types[0] == s16 && !ST.hasFullFP16();
118  },
119  [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
120  .legalFor({s16, s32, s64, v2s32, v4s32, v2s64});
121 
122  getActionDefinitionsBuilder(G_INSERT)
123  .unsupportedIf([=](const LegalityQuery &Query) {
124  return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits();
125  })
126  .legalIf([=](const LegalityQuery &Query) {
127  const LLT &Ty0 = Query.Types[0];
128  const LLT &Ty1 = Query.Types[1];
129  if (Ty0 != s32 && Ty0 != s64 && Ty0 != p0)
130  return false;
131  return isPowerOf2_32(Ty1.getSizeInBits()) &&
132  (Ty1.getSizeInBits() == 1 || Ty1.getSizeInBits() >= 8);
133  })
134  .clampScalar(0, s32, s64)
135  .widenScalarToNextPow2(0)
136  .maxScalarIf(typeInSet(0, {s32}), 1, s16)
137  .maxScalarIf(typeInSet(0, {s64}), 1, s32)
138  .widenScalarToNextPow2(1);
139 
140  getActionDefinitionsBuilder(G_EXTRACT)
141  .unsupportedIf([=](const LegalityQuery &Query) {
142  return Query.Types[0].getSizeInBits() >= Query.Types[1].getSizeInBits();
143  })
144  .legalIf([=](const LegalityQuery &Query) {
145  const LLT &Ty0 = Query.Types[0];
146  const LLT &Ty1 = Query.Types[1];
147  if (Ty1 != s32 && Ty1 != s64)
148  return false;
149  if (Ty1 == p0)
150  return true;
151  return isPowerOf2_32(Ty0.getSizeInBits()) &&
152  (Ty0.getSizeInBits() == 1 || Ty0.getSizeInBits() >= 8);
153  })
154  .clampScalar(1, s32, s64)
155  .widenScalarToNextPow2(1)
156  .maxScalarIf(typeInSet(1, {s32}), 0, s16)
157  .maxScalarIf(typeInSet(1, {s64}), 0, s32)
158  .widenScalarToNextPow2(0);
159 
160  getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
161  .legalForTypesWithMemSize({{s32, p0, 8},
162  {s32, p0, 16},
163  {s32, p0, 32},
164  {s64, p0, 64},
165  {p0, p0, 64},
166  {v2s32, p0, 64}})
167  .clampScalar(0, s32, s64)
168  .widenScalarToNextPow2(0)
169  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
170  // how to do that yet.
171  .unsupportedIfMemSizeNotPow2()
172  // Lower anything left over into G_*EXT and G_LOAD
173  .lower();
174 
175  getActionDefinitionsBuilder(G_LOAD)
176  .legalForTypesWithMemSize({{s8, p0, 8},
177  {s16, p0, 16},
178  {s32, p0, 32},
179  {s64, p0, 64},
180  {p0, p0, 64},
181  {v2s32, p0, 64}})
182  // These extends are also legal
183  .legalForTypesWithMemSize({{s32, p0, 8},
184  {s32, p0, 16}})
185  .clampScalar(0, s8, s64)
186  .widenScalarToNextPow2(0)
187  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
188  // how to do that yet.
189  .unsupportedIfMemSizeNotPow2()
190  // Lower any any-extending loads left into G_ANYEXT and G_LOAD
191  .lowerIf([=](const LegalityQuery &Query) {
192  return Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
193  })
194  .clampNumElements(0, v2s32, v2s32)
195  .clampMaxNumElements(0, s64, 1);
196 
197  getActionDefinitionsBuilder(G_STORE)
198  .legalForTypesWithMemSize({{s8, p0, 8},
199  {s16, p0, 16},
200  {s32, p0, 32},
201  {s64, p0, 64},
202  {p0, p0, 64},
203  {v2s32, p0, 64}})
204  .clampScalar(0, s8, s64)
205  .widenScalarToNextPow2(0)
206  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
207  // how to do that yet.
208  .unsupportedIfMemSizeNotPow2()
209  .lowerIf([=](const LegalityQuery &Query) {
210  return Query.Types[0].isScalar() &&
211  Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
212  })
213  .clampNumElements(0, v2s32, v2s32)
214  .clampMaxNumElements(0, s64, 1);
215 
216  // Constants
217  getActionDefinitionsBuilder(G_CONSTANT)
218  .legalFor({p0, s32, s64})
219  .clampScalar(0, s32, s64)
220  .widenScalarToNextPow2(0);
221  getActionDefinitionsBuilder(G_FCONSTANT)
222  .legalFor({s32, s64})
223  .clampScalar(0, s32, s64);
224 
225  getActionDefinitionsBuilder(G_ICMP)
226  .legalFor({{s32, s32}, {s32, s64}, {s32, p0}})
227  .clampScalar(0, s32, s32)
228  .clampScalar(1, s32, s64)
229  .widenScalarToNextPow2(1);
230 
231  getActionDefinitionsBuilder(G_FCMP)
232  .legalFor({{s32, s32}, {s32, s64}})
233  .clampScalar(0, s32, s32)
234  .clampScalar(1, s32, s64)
235  .widenScalarToNextPow2(1);
236 
237  // Extensions
238  getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
239  .legalForCartesianProduct({s8, s16, s32, s64}, {s1, s8, s16, s32});
240 
241  // FP conversions
242  getActionDefinitionsBuilder(G_FPTRUNC).legalFor(
243  {{s16, s32}, {s16, s64}, {s32, s64}});
244  getActionDefinitionsBuilder(G_FPEXT).legalFor(
245  {{s32, s16}, {s64, s16}, {s64, s32}});
246 
247  // Conversions
248  getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
249  .legalForCartesianProduct({s32, s64})
250  .clampScalar(0, s32, s64)
251  .widenScalarToNextPow2(0)
252  .clampScalar(1, s32, s64)
253  .widenScalarToNextPow2(1);
254 
255  getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
256  .legalForCartesianProduct({s32, s64})
257  .clampScalar(1, s32, s64)
258  .widenScalarToNextPow2(1)
259  .clampScalar(0, s32, s64)
260  .widenScalarToNextPow2(0);
261 
262  // Control-flow
263  getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32});
264  getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
265 
266  // Select
267  getActionDefinitionsBuilder(G_SELECT)
268  .legalFor({{s32, s1}, {s64, s1}, {p0, s1}})
269  .clampScalar(0, s32, s64)
270  .widenScalarToNextPow2(0);
271 
272  // Pointer-handling
273  getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
274  getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
275 
276  getActionDefinitionsBuilder(G_PTRTOINT)
277  .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
278  .maxScalar(0, s64)
279  .widenScalarToNextPow2(0, /*Min*/ 8);
280 
281  getActionDefinitionsBuilder(G_INTTOPTR)
282  .unsupportedIf([&](const LegalityQuery &Query) {
283  return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
284  })
285  .legalFor({{p0, s64}});
286 
287  // Casts for 32 and 64-bit width type are just copies.
288  // Same for 128-bit width type, except they are on the FPR bank.
289  getActionDefinitionsBuilder(G_BITCAST)
290  // FIXME: This is wrong since G_BITCAST is not allowed to change the
291  // number of bits but it's what the previous code described and fixing
292  // it breaks tests.
293  .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
294  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64});
295 
296  getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
297 
298  // va_list must be a pointer, but most sized types are pretty easy to handle
299  // as the destination.
300  getActionDefinitionsBuilder(G_VAARG)
301  .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
302  .clampScalar(0, s8, s64)
303  .widenScalarToNextPow2(0, /*Min*/ 8);
304 
305  if (ST.hasLSE()) {
306  getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
307  .lowerIf(all(
308  typeInSet(0, {s8, s16, s32, s64}), typeIs(1, s1), typeIs(2, p0),
310 
311  getActionDefinitionsBuilder(
312  {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND,
313  G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX,
314  G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX, G_ATOMIC_CMPXCHG})
315  .legalIf(all(
316  typeInSet(0, {s8, s16, s32, s64}), typeIs(1, p0),
318  }
319 
320  getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0});
321 
322  // Merge/Unmerge
323  for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
324  unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
325  unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
326 
327  auto notValidElt = [](const LegalityQuery &Query, unsigned TypeIdx) {
328  const LLT &Ty = Query.Types[TypeIdx];
329  if (Ty.isVector()) {
330  const LLT &EltTy = Ty.getElementType();
331  if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
332  return true;
333  if (!isPowerOf2_32(EltTy.getSizeInBits()))
334  return true;
335  }
336  return false;
337  };
338  auto scalarize =
339  [](const LegalityQuery &Query, unsigned TypeIdx) {
340  const LLT &Ty = Query.Types[TypeIdx];
341  return std::make_pair(TypeIdx, Ty.getElementType());
342  };
343 
344  // FIXME: This rule is horrible, but specifies the same as what we had
345  // before with the particularly strange definitions removed (e.g.
346  // s8 = G_MERGE_VALUES s32, s32).
347  // Part of the complexity comes from these ops being extremely flexible. For
348  // example, you can build/decompose vectors with it, concatenate vectors,
349  // etc. and in addition to this you can also bitcast with it at the same
350  // time. We've been considering breaking it up into multiple ops to make it
351  // more manageable throughout the backend.
352  getActionDefinitionsBuilder(Op)
353  // Break up vectors with weird elements into scalars
354  .fewerElementsIf(
355  [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
356  [=](const LegalityQuery &Query) { return scalarize(Query, 0); })
357  .fewerElementsIf(
358  [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
359  [=](const LegalityQuery &Query) { return scalarize(Query, 1); })
360  // Clamp the big scalar to s8-s512 and make it either a power of 2, 192,
361  // or 384.
362  .clampScalar(BigTyIdx, s8, s512)
363  .widenScalarIf(
364  [=](const LegalityQuery &Query) {
365  const LLT &Ty = Query.Types[BigTyIdx];
366  return !isPowerOf2_32(Ty.getSizeInBits()) &&
367  Ty.getSizeInBits() % 64 != 0;
368  },
369  [=](const LegalityQuery &Query) {
370  // Pick the next power of 2, or a multiple of 64 over 128.
371  // Whichever is smaller.
372  const LLT &Ty = Query.Types[BigTyIdx];
373  unsigned NewSizeInBits = 1
374  << Log2_32_Ceil(Ty.getSizeInBits() + 1);
375  if (NewSizeInBits >= 256) {
376  unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
377  if (RoundedTo < NewSizeInBits)
378  NewSizeInBits = RoundedTo;
379  }
380  return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
381  })
382  // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
383  // worth considering the multiples of 64 since 2*192 and 2*384 are not
384  // valid.
385  .clampScalar(LitTyIdx, s8, s256)
386  .widenScalarToNextPow2(LitTyIdx, /*Min*/ 8)
387  // So at this point, we have s8, s16, s32, s64, s128, s192, s256, s384,
388  // s512, <X x s8>, <X x s16>, <X x s32>, or <X x s64>.
389  // At this point it's simple enough to accept the legal types.
390  .legalIf([=](const LegalityQuery &Query) {
391  const LLT &BigTy = Query.Types[BigTyIdx];
392  const LLT &LitTy = Query.Types[LitTyIdx];
393  if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
394  return false;
395  if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
396  return false;
397  return BigTy.getSizeInBits() % LitTy.getSizeInBits() == 0;
398  })
399  // Any vectors left are the wrong size. Scalarize them.
400  .fewerElementsIf([](const LegalityQuery &Query) { return true; },
401  [](const LegalityQuery &Query) {
402  return std::make_pair(
403  0, Query.Types[0].getElementType());
404  })
405  .fewerElementsIf([](const LegalityQuery &Query) { return true; },
406  [](const LegalityQuery &Query) {
407  return std::make_pair(
408  1, Query.Types[1].getElementType());
409  });
410  }
411 
412  getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
413  .unsupportedIf([=](const LegalityQuery &Query) {
414  const LLT &EltTy = Query.Types[1].getElementType();
415  return Query.Types[0] != EltTy;
416  })
417  .minScalar(2, s64)
418  .legalIf([=](const LegalityQuery &Query) {
419  const LLT &VecTy = Query.Types[1];
420  return VecTy == v4s32 || VecTy == v2s64;
421  });
422 
423  getActionDefinitionsBuilder(G_BUILD_VECTOR)
424  .legalFor({{v4s32, s32}, {v2s64, s64}})
425  .clampNumElements(0, v4s32, v4s32)
426  .clampNumElements(0, v2s64, v2s64)
427 
428  // Deal with larger scalar types, which will be implicitly truncated.
429  .legalIf([=](const LegalityQuery &Query) {
430  return Query.Types[0].getScalarSizeInBits() <
431  Query.Types[1].getSizeInBits();
432  })
433  .minScalarSameAs(1, 0);
434 
435  computeTables();
436  verify(*ST.getInstrInfo());
437 }
438 
441  MachineIRBuilder &MIRBuilder,
442  GISelChangeObserver &Observer) const {
443  switch (MI.getOpcode()) {
444  default:
445  // No idea what to do.
446  return false;
447  case TargetOpcode::G_VAARG:
448  return legalizeVaArg(MI, MRI, MIRBuilder);
449  }
450 
451  llvm_unreachable("expected switch to return");
452 }
453 
454 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
456  MachineIRBuilder &MIRBuilder) const {
457  MIRBuilder.setInstr(MI);
458  MachineFunction &MF = MIRBuilder.getMF();
459  unsigned Align = MI.getOperand(2).getImm();
460  unsigned Dst = MI.getOperand(0).getReg();
461  unsigned ListPtr = MI.getOperand(1).getReg();
462 
463  LLT PtrTy = MRI.getType(ListPtr);
464  LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
465 
466  const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
467  unsigned List = MRI.createGenericVirtualRegister(PtrTy);
468  MIRBuilder.buildLoad(
469  List, ListPtr,
471  PtrSize, /* Align = */ PtrSize));
472 
473  unsigned DstPtr;
474  if (Align > PtrSize) {
475  // Realign the list to the actual required alignment.
476  auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
477 
478  unsigned ListTmp = MRI.createGenericVirtualRegister(PtrTy);
479  MIRBuilder.buildGEP(ListTmp, List, AlignMinus1->getOperand(0).getReg());
480 
481  DstPtr = MRI.createGenericVirtualRegister(PtrTy);
482  MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
483  } else
484  DstPtr = List;
485 
486  uint64_t ValSize = MRI.getType(Dst).getSizeInBits() / 8;
487  MIRBuilder.buildLoad(
488  Dst, DstPtr,
490  ValSize, std::max(Align, PtrSize)));
491 
492  unsigned SizeReg = MRI.createGenericVirtualRegister(IntPtrTy);
493  MIRBuilder.buildConstant(SizeReg, alignTo(ValSize, PtrSize));
494 
495  unsigned NewList = MRI.createGenericVirtualRegister(PtrTy);
496  MIRBuilder.buildGEP(NewList, DstPtr, SizeReg);
497 
498  MIRBuilder.buildStore(
499  NewList, ListPtr,
501  PtrSize, /* Align = */ PtrSize));
502 
503  MI.eraseFromParent();
504  return true;
505 }
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:552
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
This class represents lattice values for constants.
Definition: AllocatorList.h:24
The LegalityQuery object bundles together all the information that&#39;s needed to decide whether a given...
unsigned getReg() const
getReg - Returns the register number.
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:685
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified types.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool isVector() const
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx, AtomicOrdering Ordering)
True iff the specified MMO index has at an atomic ordering of at Ordering or stronger.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LLT getElementType() const
Returns the vector&#39;s element type. Only valid for vector types.
This file declares the targeting of the Machinelegalizer class for AArch64.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
MachineFunction & getMF()
Getter for the function we currently build.
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Abstract class that contains various methods for clients to notify about changes. ...
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:429
AArch64LegalizerInfo(const AArch64Subtarget &ST)
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
Helper class to build MachineInstr.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const override
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
int64_t getImm() const
This file declares the MachineIRBuilder class.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
Representation of each machine instruction.
Definition: MachineInstr.h:64
ArrayRef< LLT > Types
const NodeList & List
Definition: RDFGraph.cpp:210
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space (defaulting to 0).
const AArch64InstrInfo * getInstrInfo() const override
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
IRTranslator LLVM IR MI
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:545