LLVM  8.0.1
HexagonISelLowering.cpp
Go to the documentation of this file.
1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "HexagonISelLowering.h"
16 #include "Hexagon.h"
18 #include "HexagonRegisterInfo.h"
19 #include "HexagonSubtarget.h"
20 #include "HexagonTargetMachine.h"
22 #include "llvm/ADT/APInt.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringSwitch.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalValue.h"
41 #include "llvm/IR/InlineAsm.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/MC/MCRegisterInfo.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CodeGen.h"
52 #include "llvm/Support/Debug.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <limits>
62 #include <utility>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "hexagon-lowering"
67 
68 static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
69  cl::init(true), cl::Hidden,
70  cl::desc("Control jump table emission on Hexagon target"));
71 
72 static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched",
74  cl::desc("Enable Hexagon SDNode scheduling"));
75 
76 static cl::opt<bool> EnableFastMath("ffast-math",
78  cl::desc("Enable Fast Math processing"));
79 
80 static cl::opt<int> MinimumJumpTables("minimum-jump-tables",
82  cl::desc("Set minimum jump tables"));
83 
84 static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy",
86  cl::desc("Max #stores to inline memcpy"));
87 
88 static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os",
90  cl::desc("Max #stores to inline memcpy"));
91 
92 static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove",
94  cl::desc("Max #stores to inline memmove"));
95 
96 static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os",
98  cl::desc("Max #stores to inline memmove"));
99 
100 static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset",
102  cl::desc("Max #stores to inline memset"));
103 
104 static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os",
106  cl::desc("Max #stores to inline memset"));
107 
108 static cl::opt<bool> AlignLoads("hexagon-align-loads",
109  cl::Hidden, cl::init(false),
110  cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
111 
112 
113 namespace {
114 
115  class HexagonCCState : public CCState {
116  unsigned NumNamedVarArgParams = 0;
117 
118  public:
119  HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
121  unsigned NumNamedArgs)
122  : CCState(CC, IsVarArg, MF, locs, C),
123  NumNamedVarArgParams(NumNamedArgs) {}
124  unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
125  };
126 
127 } // end anonymous namespace
128 
129 
130 // Implement calling convention for Hexagon.
131 
132 static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
133  CCValAssign::LocInfo &LocInfo,
134  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
135  static const MCPhysReg ArgRegs[] = {
136  Hexagon::R0, Hexagon::R1, Hexagon::R2,
137  Hexagon::R3, Hexagon::R4, Hexagon::R5
138  };
139  const unsigned NumArgRegs = array_lengthof(ArgRegs);
140  unsigned RegNum = State.getFirstUnallocated(ArgRegs);
141 
142  // RegNum is an index into ArgRegs: skip a register if RegNum is odd.
143  if (RegNum != NumArgRegs && RegNum % 2 == 1)
144  State.AllocateReg(ArgRegs[RegNum]);
145 
146  // Always return false here, as this function only makes sure that the first
147  // unallocated register has an even register number and does not actually
148  // allocate a register for the current argument.
149  return false;
150 }
151 
152 #include "HexagonGenCallingConv.inc"
153 
154 
155 SDValue
157  const {
158  return SDValue();
159 }
160 
161 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
162 /// by "Src" to address "Dst" of size "Size". Alignment information is
163 /// specified by the specific parameter attribute. The copy will be passed as
164 /// a byval function parameter. Sometimes what we are copying is the end of a
165 /// larger object, the part that does not fit in registers.
167  SDValue Chain, ISD::ArgFlagsTy Flags,
168  SelectionDAG &DAG, const SDLoc &dl) {
169  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
170  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
171  /*isVolatile=*/false, /*AlwaysInline=*/false,
172  /*isTailCall=*/false,
174 }
175 
176 bool
178  CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
180  LLVMContext &Context) const {
182  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
183 
185  return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);
186  return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
187 }
188 
189 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
190 // passed by value, the function prototype is modified to return void and
191 // the value is stored in memory pointed by a pointer passed by caller.
192 SDValue
194  bool IsVarArg,
196  const SmallVectorImpl<SDValue> &OutVals,
197  const SDLoc &dl, SelectionDAG &DAG) const {
198  // CCValAssign - represent the assignment of the return value to locations.
200 
201  // CCState - Info about the registers and stack slot.
202  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
203  *DAG.getContext());
204 
205  // Analyze return values of ISD::RET
206  if (Subtarget.useHVXOps())
207  CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX);
208  else
209  CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
210 
211  SDValue Flag;
212  SmallVector<SDValue, 4> RetOps(1, Chain);
213 
214  // Copy the result values into the output registers.
215  for (unsigned i = 0; i != RVLocs.size(); ++i) {
216  CCValAssign &VA = RVLocs[i];
217 
218  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
219 
220  // Guarantee that all emitted copies are stuck together with flags.
221  Flag = Chain.getValue(1);
222  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
223  }
224 
225  RetOps[0] = Chain; // Update chain.
226 
227  // Add the flag if we have it.
228  if (Flag.getNode())
229  RetOps.push_back(Flag);
230 
231  return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
232 }
233 
235  // If either no tail call or told not to tail call at all, don't.
236  auto Attr =
237  CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
238  if (!CI->isTailCall() || Attr.getValueAsString() == "true")
239  return false;
240 
241  return true;
242 }
243 
244 unsigned HexagonTargetLowering::getRegisterByName(const char* RegName, EVT VT,
245  SelectionDAG &DAG) const {
246  // Just support r19, the linux kernel uses it.
247  unsigned Reg = StringSwitch<unsigned>(RegName)
248  .Case("r19", Hexagon::R19)
249  .Default(0);
250  if (Reg)
251  return Reg;
252 
253  report_fatal_error("Invalid register name global variable");
254 }
255 
256 /// LowerCallResult - Lower the result values of an ISD::CALL into the
257 /// appropriate copies out of appropriate physical registers. This assumes that
258 /// Chain/Glue are the input chain/glue to use, and that TheCall is the call
259 /// being lowered. Returns a SDNode with the same number of values as the
260 /// ISD::CALL.
262  SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
263  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
265  const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
266  // Assign locations to each value returned by this call.
268 
269  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
270  *DAG.getContext());
271 
272  if (Subtarget.useHVXOps())
273  CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX);
274  else
275  CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
276 
277  // Copy all of the result registers out of their specified physreg.
278  for (unsigned i = 0; i != RVLocs.size(); ++i) {
279  SDValue RetVal;
280  if (RVLocs[i].getValVT() == MVT::i1) {
281  // Return values of type MVT::i1 require special handling. The reason
282  // is that MVT::i1 is associated with the PredRegs register class, but
283  // values of that type are still returned in R0. Generate an explicit
284  // copy into a predicate register from R0, and treat the value of the
285  // predicate register as the call result.
286  auto &MRI = DAG.getMachineFunction().getRegInfo();
287  SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
288  MVT::i32, Glue);
289  // FR0 = (Value, Chain, Glue)
290  unsigned PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
291  SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
292  FR0.getValue(0), FR0.getValue(2));
293  // TPR = (Chain, Glue)
294  // Don't glue this CopyFromReg, because it copies from a virtual
295  // register. If it is glued to the call, InstrEmitter will add it
296  // as an implicit def to the call (EmitMachineNode).
297  RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1);
298  Glue = TPR.getValue(1);
299  Chain = TPR.getValue(0);
300  } else {
301  RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
302  RVLocs[i].getValVT(), Glue);
303  Glue = RetVal.getValue(2);
304  Chain = RetVal.getValue(1);
305  }
306  InVals.push_back(RetVal.getValue(0));
307  }
308 
309  return Chain;
310 }
311 
312 /// LowerCall - Functions arguments are copied from virtual regs to
313 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
314 SDValue
316  SmallVectorImpl<SDValue> &InVals) const {
317  SelectionDAG &DAG = CLI.DAG;
318  SDLoc &dl = CLI.DL;
320  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
322  SDValue Chain = CLI.Chain;
323  SDValue Callee = CLI.Callee;
324  CallingConv::ID CallConv = CLI.CallConv;
325  bool IsVarArg = CLI.IsVarArg;
326  bool DoesNotReturn = CLI.DoesNotReturn;
327 
328  bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
330  MachineFrameInfo &MFI = MF.getFrameInfo();
331  auto PtrVT = getPointerTy(MF.getDataLayout());
332 
333  unsigned NumParams = CLI.CS.getInstruction()
334  ? CLI.CS.getFunctionType()->getNumParams()
335  : 0;
336  if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee))
337  Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32);
338 
339  // Analyze operands of the call, assigning locations to each operand.
341  HexagonCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext(),
342  NumParams);
343 
344  if (Subtarget.useHVXOps())
345  CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
346  else
347  CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
348 
349  auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
350  if (Attr.getValueAsString() == "true")
351  CLI.IsTailCall = false;
352 
353  if (CLI.IsTailCall) {
354  bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
355  CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
356  IsVarArg, IsStructRet, StructAttrFlag, Outs,
357  OutVals, Ins, DAG);
358  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
359  CCValAssign &VA = ArgLocs[i];
360  if (VA.isMemLoc()) {
361  CLI.IsTailCall = false;
362  break;
363  }
364  }
365  LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
366  : "Argument must be passed on stack. "
367  "Not eligible for Tail Call\n"));
368  }
369  // Get a count of how many bytes are to be pushed on the stack.
370  unsigned NumBytes = CCInfo.getNextStackOffset();
372  SmallVector<SDValue, 8> MemOpChains;
373 
374  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
375  SDValue StackPtr =
376  DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
377 
378  bool NeedsArgAlign = false;
379  unsigned LargestAlignSeen = 0;
380  // Walk the register/memloc assignments, inserting copies/loads.
381  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
382  CCValAssign &VA = ArgLocs[i];
383  SDValue Arg = OutVals[i];
384  ISD::ArgFlagsTy Flags = Outs[i].Flags;
385  // Record if we need > 8 byte alignment on an argument.
386  bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT());
387  NeedsArgAlign |= ArgAlign;
388 
389  // Promote the value if needed.
390  switch (VA.getLocInfo()) {
391  default:
392  // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
393  llvm_unreachable("Unknown loc info!");
394  case CCValAssign::Full:
395  break;
396  case CCValAssign::BCvt:
397  Arg = DAG.getBitcast(VA.getLocVT(), Arg);
398  break;
399  case CCValAssign::SExt:
400  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
401  break;
402  case CCValAssign::ZExt:
403  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
404  break;
405  case CCValAssign::AExt:
406  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
407  break;
408  }
409 
410  if (VA.isMemLoc()) {
411  unsigned LocMemOffset = VA.getLocMemOffset();
412  SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
413  StackPtr.getValueType());
414  MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
415  if (ArgAlign)
416  LargestAlignSeen = std::max(LargestAlignSeen,
417  VA.getLocVT().getStoreSizeInBits() >> 3);
418  if (Flags.isByVal()) {
419  // The argument is a struct passed by value. According to LLVM, "Arg"
420  // is a pointer.
421  MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
422  Flags, DAG, dl));
423  } else {
425  DAG.getMachineFunction(), LocMemOffset);
426  SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
427  MemOpChains.push_back(S);
428  }
429  continue;
430  }
431 
432  // Arguments that can be passed on register must be kept at RegsToPass
433  // vector.
434  if (VA.isRegLoc())
435  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
436  }
437 
438  if (NeedsArgAlign && Subtarget.hasV60Ops()) {
439  LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
440  unsigned VecAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
441  LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
442  MFI.ensureMaxAlignment(LargestAlignSeen);
443  }
444  // Transform all store nodes into one single node because all store
445  // nodes are independent of each other.
446  if (!MemOpChains.empty())
447  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
448 
449  SDValue Glue;
450  if (!CLI.IsTailCall) {
451  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
452  Glue = Chain.getValue(1);
453  }
454 
455  // Build a sequence of copy-to-reg nodes chained together with token
456  // chain and flag operands which copy the outgoing args into registers.
457  // The Glue is necessary since all emitted instructions must be
458  // stuck together.
459  if (!CLI.IsTailCall) {
460  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
461  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
462  RegsToPass[i].second, Glue);
463  Glue = Chain.getValue(1);
464  }
465  } else {
466  // For tail calls lower the arguments to the 'real' stack slot.
467  //
468  // Force all the incoming stack arguments to be loaded from the stack
469  // before any new outgoing arguments are stored to the stack, because the
470  // outgoing stack slots may alias the incoming argument stack slots, and
471  // the alias isn't otherwise explicit. This is slightly more conservative
472  // than necessary, because it means that each store effectively depends
473  // on every argument instead of just those arguments it would clobber.
474  //
475  // Do not flag preceding copytoreg stuff together with the following stuff.
476  Glue = SDValue();
477  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
478  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
479  RegsToPass[i].second, Glue);
480  Glue = Chain.getValue(1);
481  }
482  Glue = SDValue();
483  }
484 
485  bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
486  unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
487 
488  // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
489  // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
490  // node so that legalize doesn't hack it.
491  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
492  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags);
493  } else if (ExternalSymbolSDNode *S =
494  dyn_cast<ExternalSymbolSDNode>(Callee)) {
495  Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags);
496  }
497 
498  // Returns a chain & a flag for retval copy to use.
499  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
501  Ops.push_back(Chain);
502  Ops.push_back(Callee);
503 
504  // Add argument registers to the end of the list so that they are
505  // known live into the call.
506  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
507  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
508  RegsToPass[i].second.getValueType()));
509  }
510 
511  const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
512  assert(Mask && "Missing call preserved mask for calling convention");
513  Ops.push_back(DAG.getRegisterMask(Mask));
514 
515  if (Glue.getNode())
516  Ops.push_back(Glue);
517 
518  if (CLI.IsTailCall) {
519  MFI.setHasTailCall();
520  return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
521  }
522 
523  // Set this here because we need to know this for "hasFP" in frame lowering.
524  // The target-independent code calls getFrameRegister before setting it, and
525  // getFrameRegister uses hasFP to determine whether the function has FP.
526  MFI.setHasCalls(true);
527 
528  unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
529  Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
530  Glue = Chain.getValue(1);
531 
532  // Create the CALLSEQ_END node.
533  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
534  DAG.getIntPtrConstant(0, dl, true), Glue, dl);
535  Glue = Chain.getValue(1);
536 
537  // Handle result values, copying them out of physregs into vregs that we
538  // return.
539  return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
540  InVals, OutVals, Callee);
541 }
542 
543 /// Returns true by value, base pointer and offset pointer and addressing
544 /// mode by reference if this node can be combined with a load / store to
545 /// form a post-indexed load / store.
548  SelectionDAG &DAG) const {
550  if (!LSN)
551  return false;
552  EVT VT = LSN->getMemoryVT();
553  if (!VT.isSimple())
554  return false;
555  bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
556  VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
557  VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
558  VT == MVT::v4i16 || VT == MVT::v8i8 ||
559  Subtarget.isHVXVectorType(VT.getSimpleVT());
560  if (!IsLegalType)
561  return false;
562 
563  if (Op->getOpcode() != ISD::ADD)
564  return false;
565  Base = Op->getOperand(0);
566  Offset = Op->getOperand(1);
567  if (!isa<ConstantSDNode>(Offset.getNode()))
568  return false;
569  AM = ISD::POST_INC;
570 
571  int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
572  return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);
573 }
574 
575 SDValue
578  auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
579  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
580  unsigned LR = HRI.getRARegister();
581 
582  if (Op.getOpcode() != ISD::INLINEASM || HMFI.hasClobberLR())
583  return Op;
584 
585  unsigned NumOps = Op.getNumOperands();
586  if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
587  --NumOps; // Ignore the flag operand.
588 
589  for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
590  unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
591  unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
592  ++i; // Skip the ID value.
593 
594  switch (InlineAsm::getKind(Flags)) {
595  default:
596  llvm_unreachable("Bad flags!");
598  case InlineAsm::Kind_Imm:
599  case InlineAsm::Kind_Mem:
600  i += NumVals;
601  break;
605  for (; NumVals; --NumVals, ++i) {
606  unsigned Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
607  if (Reg != LR)
608  continue;
609  HMFI.setHasClobberLR(true);
610  return Op;
611  }
612  break;
613  }
614  }
615  }
616 
617  return Op;
618 }
619 
620 // Need to transform ISD::PREFETCH into something that doesn't inherit
621 // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
622 // SDNPMayStore.
624  SelectionDAG &DAG) const {
625  SDValue Chain = Op.getOperand(0);
626  SDValue Addr = Op.getOperand(1);
627  // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
628  // if the "reg" is fed by an "add".
629  SDLoc DL(Op);
630  SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
631  return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
632 }
633 
634 // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
635 // is marked as having side-effects, while the register read on Hexagon does
636 // not have any. TableGen refuses to accept the direct pattern from that node
637 // to the A4_tfrcpp.
639  SelectionDAG &DAG) const {
640  SDValue Chain = Op.getOperand(0);
641  SDLoc dl(Op);
643  return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
644 }
645 
647  SelectionDAG &DAG) const {
648  SDValue Chain = Op.getOperand(0);
649  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
650  // Lower the hexagon_prefetch builtin to DCFETCH, as above.
651  if (IntNo == Intrinsic::hexagon_prefetch) {
652  SDValue Addr = Op.getOperand(2);
653  SDLoc DL(Op);
654  SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
655  return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
656  }
657  return SDValue();
658 }
659 
660 SDValue
662  SelectionDAG &DAG) const {
663  SDValue Chain = Op.getOperand(0);
664  SDValue Size = Op.getOperand(1);
665  SDValue Align = Op.getOperand(2);
666  SDLoc dl(Op);
667 
669  assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
670 
671  unsigned A = AlignConst->getSExtValue();
672  auto &HFI = *Subtarget.getFrameLowering();
673  // "Zero" means natural stack alignment.
674  if (A == 0)
675  A = HFI.getStackAlignment();
676 
677  LLVM_DEBUG({
678  dbgs () << __func__ << " Align: " << A << " Size: ";
679  Size.getNode()->dump(&DAG);
680  dbgs() << "\n";
681  });
682 
683  SDValue AC = DAG.getConstant(A, dl, MVT::i32);
685  SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
686 
687  DAG.ReplaceAllUsesOfValueWith(Op, AA);
688  return AA;
689 }
690 
692  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
693  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
694  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
696  MachineFrameInfo &MFI = MF.getFrameInfo();
698 
699  // Assign locations to all of the incoming arguments.
701  HexagonCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext(),
703 
704  if (Subtarget.useHVXOps())
705  CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
706  else
707  CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
708 
709  // For LLVM, in the case when returning a struct by value (>8byte),
710  // the first argument is a pointer that points to the location on caller's
711  // stack where the return value will be stored. For Hexagon, the location on
712  // caller's stack is passed only when the struct size is smaller than (and
713  // equal to) 8 bytes. If not, no address will be passed into callee and
714  // callee return the result direclty through R0/R1.
715 
716  auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
717 
718  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
719  CCValAssign &VA = ArgLocs[i];
720  ISD::ArgFlagsTy Flags = Ins[i].Flags;
721  bool ByVal = Flags.isByVal();
722 
723  // Arguments passed in registers:
724  // 1. 32- and 64-bit values and HVX vectors are passed directly,
725  // 2. Large structs are passed via an address, and the address is
726  // passed in a register.
727  if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
728  llvm_unreachable("ByValSize must be bigger than 8 bytes");
729 
730  bool InReg = VA.isRegLoc() &&
731  (!ByVal || (ByVal && Flags.getByValSize() > 8));
732 
733  if (InReg) {
734  MVT RegVT = VA.getLocVT();
735  if (VA.getLocInfo() == CCValAssign::BCvt)
736  RegVT = VA.getValVT();
737 
738  const TargetRegisterClass *RC = getRegClassFor(RegVT);
739  unsigned VReg = MRI.createVirtualRegister(RC);
740  SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
741 
742  // Treat values of type MVT::i1 specially: they are passed in
743  // registers of type i32, but they need to remain as values of
744  // type i1 for consistency of the argument lowering.
745  if (VA.getValVT() == MVT::i1) {
746  assert(RegVT.getSizeInBits() <= 32);
747  SDValue T = DAG.getNode(ISD::AND, dl, RegVT,
748  Copy, DAG.getConstant(1, dl, RegVT));
749  Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT),
750  ISD::SETNE);
751  } else {
752 #ifndef NDEBUG
753  unsigned RegSize = RegVT.getSizeInBits();
754  assert(RegSize == 32 || RegSize == 64 ||
755  Subtarget.isHVXVectorType(RegVT));
756 #endif
757  }
758  InVals.push_back(Copy);
759  MRI.addLiveIn(VA.getLocReg(), VReg);
760  } else {
761  assert(VA.isMemLoc() && "Argument should be passed in memory");
762 
763  // If it's a byval parameter, then we need to compute the
764  // "real" size, not the size of the pointer.
765  unsigned ObjSize = Flags.isByVal()
766  ? Flags.getByValSize()
767  : VA.getLocVT().getStoreSizeInBits() / 8;
768 
769  // Create the frame index object for this incoming parameter.
771  int FI = MFI.CreateFixedObject(ObjSize, Offset, true);
772  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
773 
774  if (Flags.isByVal()) {
775  // If it's a pass-by-value aggregate, then do not dereference the stack
776  // location. Instead, we should generate a reference to the stack
777  // location.
778  InVals.push_back(FIN);
779  } else {
780  SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
782  InVals.push_back(L);
783  }
784  }
785  }
786 
787 
788  if (IsVarArg) {
789  // This will point to the next argument passed via stack.
790  int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset();
791  int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
792  HMFI.setVarArgsFrameIndex(FI);
793  }
794 
795  return Chain;
796 }
797 
798 SDValue
800  // VASTART stores the address of the VarArgsFrameIndex slot into the
801  // memory location argument.
805  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
806  return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
807  MachinePointerInfo(SV));
808 }
809 
811  const SDLoc &dl(Op);
812  SDValue LHS = Op.getOperand(0);
813  SDValue RHS = Op.getOperand(1);
814  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
815  MVT ResTy = ty(Op);
816  MVT OpTy = ty(LHS);
817 
818  if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
819  MVT ElemTy = OpTy.getVectorElementType();
820  assert(ElemTy.isScalarInteger());
822  OpTy.getVectorNumElements());
823  return DAG.getSetCC(dl, ResTy,
824  DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy),
825  DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC);
826  }
827 
828  // Treat all other vector types as legal.
829  if (ResTy.isVector())
830  return Op;
831 
832  // Comparisons of short integers should use sign-extend, not zero-extend,
833  // since we can represent small negative values in the compare instructions.
834  // The LLVM default is to use zero-extend arbitrarily in these cases.
835  auto isSExtFree = [this](SDValue N) {
836  switch (N.getOpcode()) {
837  case ISD::TRUNCATE: {
838  // A sign-extend of a truncate of a sign-extend is free.
839  SDValue Op = N.getOperand(0);
840  if (Op.getOpcode() != ISD::AssertSext)
841  return false;
842  EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT();
843  unsigned ThisBW = ty(N).getSizeInBits();
844  unsigned OrigBW = OrigTy.getSizeInBits();
845  // The type that was sign-extended to get the AssertSext must be
846  // narrower than the type of N (so that N has still the same value
847  // as the original).
848  return ThisBW >= OrigBW;
849  }
850  case ISD::LOAD:
851  // We have sign-extended loads.
852  return true;
853  }
854  return false;
855  };
856 
857  if (OpTy == MVT::i8 || OpTy == MVT::i16) {
859  bool IsNegative = C && C->getAPIntValue().isNegative();
860  if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
861  return DAG.getSetCC(dl, ResTy,
862  DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32),
863  DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC);
864  }
865 
866  return SDValue();
867 }
868 
869 SDValue
871  SDValue PredOp = Op.getOperand(0);
872  SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
873  EVT OpVT = Op1.getValueType();
874  SDLoc DL(Op);
875 
876  if (OpVT == MVT::v2i16) {
877  SDValue X1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op1);
878  SDValue X2 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op2);
879  SDValue SL = DAG.getNode(ISD::VSELECT, DL, MVT::v2i32, PredOp, X1, X2);
880  SDValue TR = DAG.getNode(ISD::TRUNCATE, DL, MVT::v2i16, SL);
881  return TR;
882  }
883 
884  return SDValue();
885 }
886 
887 static Constant *convert_i1_to_i8(const Constant *ConstVal) {
889  const ConstantVector *CV = dyn_cast<ConstantVector>(ConstVal);
890  if (!CV)
891  return nullptr;
892 
893  LLVMContext &Ctx = ConstVal->getContext();
894  IRBuilder<> IRB(Ctx);
895  unsigned NumVectorElements = CV->getNumOperands();
896  assert(isPowerOf2_32(NumVectorElements) &&
897  "conversion only supported for pow2 VectorSize!");
898 
899  for (unsigned i = 0; i < NumVectorElements / 8; ++i) {
900  uint8_t x = 0;
901  for (unsigned j = 0; j < 8; ++j) {
902  uint8_t y = CV->getOperand(i * 8 + j)->getUniqueInteger().getZExtValue();
903  x |= y << (7 - j);
904  }
905  assert((x == 0 || x == 255) && "Either all 0's or all 1's expected!");
906  NewConst.push_back(IRB.getInt8(x));
907  }
908  return ConstantVector::get(NewConst);
909 }
910 
911 SDValue
913  EVT ValTy = Op.getValueType();
914  ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
915  Constant *CVal = nullptr;
916  bool isVTi1Type = false;
917  if (const Constant *ConstVal = dyn_cast<Constant>(CPN->getConstVal())) {
918  Type *CValTy = ConstVal->getType();
919  if (CValTy->isVectorTy() &&
920  CValTy->getVectorElementType()->isIntegerTy(1)) {
921  CVal = convert_i1_to_i8(ConstVal);
922  isVTi1Type = (CVal != nullptr);
923  }
924  }
925  unsigned Align = CPN->getAlignment();
926  bool IsPositionIndependent = isPositionIndependent();
927  unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
928 
929  unsigned Offset = 0;
930  SDValue T;
931  if (CPN->isMachineConstantPoolEntry())
932  T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Align, Offset,
933  TF);
934  else if (isVTi1Type)
935  T = DAG.getTargetConstantPool(CVal, ValTy, Align, Offset, TF);
936  else
937  T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Align, Offset, TF);
938 
939  assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&
940  "Inconsistent target flag encountered");
941 
942  if (IsPositionIndependent)
943  return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
944  return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
945 }
946 
947 SDValue
949  EVT VT = Op.getValueType();
950  int Idx = cast<JumpTableSDNode>(Op)->getIndex();
951  if (isPositionIndependent()) {
953  return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
954  }
955 
956  SDValue T = DAG.getTargetJumpTable(Idx, VT);
957  return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T);
958 }
959 
960 SDValue
962  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
964  MachineFrameInfo &MFI = MF.getFrameInfo();
965  MFI.setReturnAddressIsTaken(true);
966 
967  if (verifyReturnAddressArgumentIsConstant(Op, DAG))
968  return SDValue();
969 
970  EVT VT = Op.getValueType();
971  SDLoc dl(Op);
972  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
973  if (Depth) {
974  SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
975  SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
976  return DAG.getLoad(VT, dl, DAG.getEntryNode(),
977  DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
979  }
980 
981  // Return LR, which contains the return address. Mark it an implicit live-in.
982  unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
983  return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
984 }
985 
986 SDValue
988  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
990  MFI.setFrameAddressIsTaken(true);
991 
992  EVT VT = Op.getValueType();
993  SDLoc dl(Op);
994  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
995  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
996  HRI.getFrameRegister(), VT);
997  while (Depth--)
998  FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1000  return FrameAddr;
1001 }
1002 
1003 SDValue
1005  SDLoc dl(Op);
1006  return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1007 }
1008 
1009 SDValue
1011  SDLoc dl(Op);
1012  auto *GAN = cast<GlobalAddressSDNode>(Op);
1013  auto PtrVT = getPointerTy(DAG.getDataLayout());
1014  auto *GV = GAN->getGlobal();
1015  int64_t Offset = GAN->getOffset();
1016 
1017  auto &HLOF = *HTM.getObjFileLowering();
1018  Reloc::Model RM = HTM.getRelocationModel();
1019 
1020  if (RM == Reloc::Static) {
1021  SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1022  const GlobalObject *GO = GV->getBaseObject();
1023  if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1024  return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1025  return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1026  }
1027 
1028  bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1029  if (UsePCRel) {
1030  SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
1032  return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1033  }
1034 
1035  // Use GOT index.
1036  SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1037  SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT);
1038  SDValue Off = DAG.getConstant(Offset, dl, MVT::i32);
1039  return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1040 }
1041 
1042 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1043 SDValue
1045  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1046  SDLoc dl(Op);
1047  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1048 
1049  Reloc::Model RM = HTM.getRelocationModel();
1050  if (RM == Reloc::Static) {
1051  SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
1052  return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
1053  }
1054 
1055  SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL);
1056  return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A);
1057 }
1058 
1059 SDValue
1061  const {
1062  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1065  return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
1066 }
1067 
1068 SDValue
1070  GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1071  unsigned char OperandFlags) const {
1072  MachineFunction &MF = DAG.getMachineFunction();
1073  MachineFrameInfo &MFI = MF.getFrameInfo();
1074  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1075  SDLoc dl(GA);
1076  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
1077  GA->getValueType(0),
1078  GA->getOffset(),
1079  OperandFlags);
1080  // Create Operands for the call.The Operands should have the following:
1081  // 1. Chain SDValue
1082  // 2. Callee which in this case is the Global address value.
1083  // 3. Registers live into the call.In this case its R0, as we
1084  // have just one argument to be passed.
1085  // 4. Glue.
1086  // Note: The order is important.
1087 
1088  const auto &HRI = *Subtarget.getRegisterInfo();
1089  const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1090  assert(Mask && "Missing call preserved mask for calling convention");
1091  SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT),
1092  DAG.getRegisterMask(Mask), Glue };
1093  Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
1094 
1095  // Inform MFI that function has calls.
1096  MFI.setAdjustsStack(true);
1097 
1098  Glue = Chain.getValue(1);
1099  return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1100 }
1101 
1102 //
1103 // Lower using the intial executable model for TLS addresses
1104 //
1105 SDValue
1107  SelectionDAG &DAG) const {
1108  SDLoc dl(GA);
1109  int64_t Offset = GA->getOffset();
1110  auto PtrVT = getPointerTy(DAG.getDataLayout());
1111 
1112  // Get the thread pointer.
1113  SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1114 
1115  bool IsPositionIndependent = isPositionIndependent();
1116  unsigned char TF =
1117  IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1118 
1119  // First generate the TLS symbol address
1120  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
1121  Offset, TF);
1122 
1123  SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1124 
1125  if (IsPositionIndependent) {
1126  // Generate the GOT pointer in case of position independent code
1127  SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG);
1128 
1129  // Add the TLS Symbol address to GOT pointer.This gives
1130  // GOT relative relocation for the symbol.
1131  Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1132  }
1133 
1134  // Load the offset value for TLS symbol.This offset is relative to
1135  // thread pointer.
1136  SDValue LoadOffset =
1137  DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
1138 
1139  // Address of the thread local variable is the add of thread
1140  // pointer and the offset of the variable.
1141  return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1142 }
1143 
1144 //
1145 // Lower using the local executable model for TLS addresses
1146 //
1147 SDValue
1149  SelectionDAG &DAG) const {
1150  SDLoc dl(GA);
1151  int64_t Offset = GA->getOffset();
1152  auto PtrVT = getPointerTy(DAG.getDataLayout());
1153 
1154  // Get the thread pointer.
1155  SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1156  // Generate the TLS symbol address
1157  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1159  SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1160 
1161  // Address of the thread local variable is the add of thread
1162  // pointer and the offset of the variable.
1163  return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
1164 }
1165 
1166 //
1167 // Lower using the general dynamic model for TLS addresses
1168 //
1169 SDValue
1171  SelectionDAG &DAG) const {
1172  SDLoc dl(GA);
1173  int64_t Offset = GA->getOffset();
1174  auto PtrVT = getPointerTy(DAG.getDataLayout());
1175 
1176  // First generate the TLS symbol address
1177  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1179 
1180  // Then, generate the GOT pointer
1181  SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
1182 
1183  // Add the TLS symbol and the GOT pointer
1184  SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1185  SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1186 
1187  // Copy over the argument to R0
1188  SDValue InFlag;
1189  Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
1190  InFlag = Chain.getValue(1);
1191 
1192  unsigned Flags =
1193  static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls()
1196 
1197  return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT,
1198  Hexagon::R0, Flags);
1199 }
1200 
1201 //
1202 // Lower TLS addresses.
1203 //
1204 // For now for dynamic models, we only support the general dynamic model.
1205 //
1206 SDValue
1208  SelectionDAG &DAG) const {
1209  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1210 
1211  switch (HTM.getTLSModel(GA->getGlobal())) {
1214  return LowerToTLSGeneralDynamicModel(GA, DAG);
1215  case TLSModel::InitialExec:
1216  return LowerToTLSInitialExecModel(GA, DAG);
1217  case TLSModel::LocalExec:
1218  return LowerToTLSLocalExecModel(GA, DAG);
1219  }
1220  llvm_unreachable("Bogus TLS model");
1221 }
1222 
1223 //===----------------------------------------------------------------------===//
1224 // TargetLowering Implementation
1225 //===----------------------------------------------------------------------===//
1226 
1228  const HexagonSubtarget &ST)
1229  : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1230  Subtarget(ST) {
1231  auto &HRI = *Subtarget.getRegisterInfo();
1232 
1236  setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1239 
1242 
1245  else
1247 
1248  // Limits for inline expansion of memcpy/memmove
1255 
1256  //
1257  // Set up register classes.
1258  //
1259 
1260  addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1261  addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1262  addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1263  addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1264  addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1265  addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1266  addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1267  addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1268  addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1269  addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1270  addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1271 
1272  addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1273  addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1274 
1275  //
1276  // Handling of scalar operations.
1277  //
1278  // All operations default to "legal", except:
1279  // - indexed loads and stores (pre-/post-incremented),
1280  // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1281  // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1282  // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1283  // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1284  // which default to "expand" for at least one type.
1285 
1286  // Misc operations.
1302 
1303  // Custom legalize GlobalAddress nodes into CONST32.
1307 
1308  // Hexagon needs to optimize cases with negative constants.
1313 
1314  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1319 
1323 
1324  if (EmitJumpTables)
1326  else
1329 
1332 
1333  // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit,
1334  // but they only operate on i64.
1335  for (MVT VT : MVT::integer_valuetypes()) {
1342  }
1345 
1350 
1351  // Popcount can count # of 1s in i64 but returns i32.
1356 
1361 
1366 
1367  for (unsigned IntExpOp :
1372  for (MVT VT : MVT::integer_valuetypes())
1373  setOperationAction(IntExpOp, VT, Expand);
1374  }
1375 
1376  for (unsigned FPExpOp :
1379  for (MVT VT : MVT::fp_valuetypes())
1380  setOperationAction(FPExpOp, VT, Expand);
1381  }
1382 
1383  // No extending loads from i32.
1384  for (MVT VT : MVT::integer_valuetypes()) {
1388  }
1389  // Turn FP truncstore into trunc + store.
1391  // Turn FP extload into load/fpextend.
1392  for (MVT VT : MVT::fp_valuetypes())
1394 
1395  // Expand BR_CC and SELECT_CC for all integer and fp types.
1396  for (MVT VT : MVT::integer_valuetypes()) {
1399  }
1400  for (MVT VT : MVT::fp_valuetypes()) {
1403  }
1405 
1406  //
1407  // Handling of vector operations.
1408  //
1409 
1410  // Set the action for vector operations to "expand", then override it with
1411  // either "custom" or "legal" for specific cases.
1412  static const unsigned VectExpOps[] = {
1413  // Integer arithmetic:
1417  // Logical/bit:
1420  // Floating point arithmetic/math functions:
1427  // Misc:
1429  // Vector:
1434  };
1435 
1436  for (MVT VT : MVT::vector_valuetypes()) {
1437  for (unsigned VectExpOp : VectExpOps)
1438  setOperationAction(VectExpOp, VT, Expand);
1439 
1440  // Expand all extending loads and truncating stores:
1441  for (MVT TargetVT : MVT::vector_valuetypes()) {
1442  if (TargetVT == VT)
1443  continue;
1444  setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
1445  setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand);
1446  setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand);
1447  setTruncStoreAction(VT, TargetVT, Expand);
1448  }
1449 
1450  // Normalize all inputs to SELECT to be vectors of i32.
1451  if (VT.getVectorElementType() != MVT::i32) {
1452  MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
1454  AddPromotedToType(ISD::SELECT, VT, VT32);
1455  }
1459  }
1460 
1461  // Extending loads from (native) vectors of i8 into (native) vectors of i16
1462  // are legal.
1469 
1470  // Types natively supported:
1471  for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1479 
1480  setOperationAction(ISD::ADD, NativeVT, Legal);
1481  setOperationAction(ISD::SUB, NativeVT, Legal);
1482  setOperationAction(ISD::MUL, NativeVT, Legal);
1483  setOperationAction(ISD::AND, NativeVT, Legal);
1484  setOperationAction(ISD::OR, NativeVT, Legal);
1485  setOperationAction(ISD::XOR, NativeVT, Legal);
1486  }
1487 
1488  // Custom lower unaligned loads.
1489  // Also, for both loads and stores, verify the alignment of the address
1490  // in case it is a compile-time constant. This is a usability feature to
1491  // provide a meaningful error message to users.
1496  }
1497 
1498  for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v2i32, MVT::v4i16, MVT::v2i32}) {
1503  }
1504 
1505  // Custom-lower bitcasts from i8 to v8i1.
1509  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
1510  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
1511  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
1512 
1513  // V5+.
1518 
1521 
1534 
1535  // Handling of indexed loads/stores: default is "expand".
1536  //
1541  }
1542 
1543  // Subtarget-specific operation actions.
1544  //
1545  if (Subtarget.hasV60Ops()) {
1550  }
1551  if (Subtarget.hasV66Ops()) {
1554  }
1555 
1556  if (Subtarget.useHVXOps())
1557  initializeHVXLowering();
1558 
1560 
1561  //
1562  // Library calls for unsupported operations
1563  //
1564  bool FastMath = EnableFastMath;
1565 
1566  setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1567  setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1568  setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1569  setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1570  setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1571  setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1572  setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1573  setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1574 
1575  setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1576  setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1577  setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1578  setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1579  setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1580  setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1581 
1582  // This is the only fast library function for sqrtd.
1583  if (FastMath)
1584  setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1585 
1586  // Prefix is: nothing for "slow-math",
1587  // "fast2_" for V5+ fast-math double-precision
1588  // (actually, keep fast-math and fast-math2 separate for now)
1589  if (FastMath) {
1590  setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1591  setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1592  setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1593  setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1594  setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1595  } else {
1596  setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1597  setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1598  setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1599  setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1600  setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1601  }
1602 
1603  if (FastMath)
1604  setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1605  else
1606  setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
1607 
1608  // These cause problems when the shift amount is non-constant.
1609  setLibcallName(RTLIB::SHL_I128, nullptr);
1610  setLibcallName(RTLIB::SRL_I128, nullptr);
1611  setLibcallName(RTLIB::SRA_I128, nullptr);
1612 }
1613 
1614 const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1615  switch ((HexagonISD::NodeType)Opcode) {
1616  case HexagonISD::ADDC: return "HexagonISD::ADDC";
1617  case HexagonISD::SUBC: return "HexagonISD::SUBC";
1618  case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
1619  case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
1620  case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
1621  case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
1622  case HexagonISD::CALL: return "HexagonISD::CALL";
1623  case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
1624  case HexagonISD::CALLR: return "HexagonISD::CALLR";
1625  case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1626  case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1627  case HexagonISD::CONST32: return "HexagonISD::CONST32";
1628  case HexagonISD::CP: return "HexagonISD::CP";
1629  case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
1630  case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1631  case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT";
1632  case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
1633  case HexagonISD::INSERT: return "HexagonISD::INSERT";
1634  case HexagonISD::JT: return "HexagonISD::JT";
1635  case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1636  case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1637  case HexagonISD::VASL: return "HexagonISD::VASL";
1638  case HexagonISD::VASR: return "HexagonISD::VASR";
1639  case HexagonISD::VLSR: return "HexagonISD::VLSR";
1640  case HexagonISD::VSPLAT: return "HexagonISD::VSPLAT";
1641  case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW";
1642  case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
1643  case HexagonISD::VROR: return "HexagonISD::VROR";
1644  case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
1645  case HexagonISD::VZERO: return "HexagonISD::VZERO";
1646  case HexagonISD::VSPLATW: return "HexagonISD::VSPLATW";
1647  case HexagonISD::D2P: return "HexagonISD::D2P";
1648  case HexagonISD::P2D: return "HexagonISD::P2D";
1649  case HexagonISD::V2Q: return "HexagonISD::V2Q";
1650  case HexagonISD::Q2V: return "HexagonISD::Q2V";
1651  case HexagonISD::QCAT: return "HexagonISD::QCAT";
1652  case HexagonISD::QTRUE: return "HexagonISD::QTRUE";
1653  case HexagonISD::QFALSE: return "HexagonISD::QFALSE";
1654  case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST";
1655  case HexagonISD::VALIGN: return "HexagonISD::VALIGN";
1656  case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR";
1657  case HexagonISD::OP_END: break;
1658  }
1659  return nullptr;
1660 }
1661 
1662 void
1663 HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
1664  unsigned NeedAlign) const {
1665  auto *CA = dyn_cast<ConstantSDNode>(Ptr);
1666  if (!CA)
1667  return;
1668  unsigned Addr = CA->getZExtValue();
1669  unsigned HaveAlign = Addr != 0 ? 1u << countTrailingZeros(Addr) : NeedAlign;
1670  if (HaveAlign < NeedAlign) {
1671  std::string ErrMsg;
1672  raw_string_ostream O(ErrMsg);
1673  O << "Misaligned constant address: " << format_hex(Addr, 10)
1674  << " has alignment " << HaveAlign
1675  << ", but the memory access requires " << NeedAlign;
1676  if (DebugLoc DL = dl.getDebugLoc())
1677  DL.print(O << ", at ");
1678  report_fatal_error(O.str());
1679  }
1680 }
1681 
1682 // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
1683 // intrinsic.
1684 static bool isBrevLdIntrinsic(const Value *Inst) {
1685  unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
1686  return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1692 }
1693 
1694 // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
1695 // instruction. So far we only handle bitcast, extract value and bit reverse
1696 // load intrinsic instructions. Should we handle CGEP ?
1698  if (Operator::getOpcode(V) == Instruction::ExtractValue ||
1699  Operator::getOpcode(V) == Instruction::BitCast)
1700  V = cast<Operator>(V)->getOperand(0);
1701  else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V))
1702  V = cast<Instruction>(V)->getOperand(0);
1703  return V;
1704 }
1705 
1706 // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
1707 // a back edge. If the back edge comes from the intrinsic itself, the incoming
1708 // edge is returned.
1709 static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
1710  const BasicBlock *Parent = PN->getParent();
1711  int Idx = -1;
1712  for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
1713  BasicBlock *Blk = PN->getIncomingBlock(i);
1714  // Determine if the back edge is originated from intrinsic.
1715  if (Blk == Parent) {
1716  Value *BackEdgeVal = PN->getIncomingValue(i);
1717  Value *BaseVal;
1718  // Loop over till we return the same Value or we hit the IntrBaseVal.
1719  do {
1720  BaseVal = BackEdgeVal;
1721  BackEdgeVal = getBrevLdObject(BackEdgeVal);
1722  } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
1723  // If the getBrevLdObject returns IntrBaseVal, we should return the
1724  // incoming edge.
1725  if (IntrBaseVal == BackEdgeVal)
1726  continue;
1727  Idx = i;
1728  break;
1729  } else // Set the node to incoming edge.
1730  Idx = i;
1731  }
1732  assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");
1733  return PN->getIncomingValue(Idx);
1734 }
1735 
1736 // Bit-reverse Load Intrinsic: Figure out the underlying object the base
1737 // pointer points to, for the bit-reverse load intrinsic. Setting this to
1738 // memoperand might help alias analysis to figure out the dependencies.
1740  Value *IntrBaseVal = V;
1741  Value *BaseVal;
1742  // Loop over till we return the same Value, implies we either figure out
1743  // the object or we hit a PHI
1744  do {
1745  BaseVal = V;
1746  V = getBrevLdObject(V);
1747  } while (BaseVal != V);
1748 
1749  // Identify the object from PHINode.
1750  if (const PHINode *PN = dyn_cast<PHINode>(V))
1751  return returnEdge(PN, IntrBaseVal);
1752  // For non PHI nodes, the object is the last value returned by getBrevLdObject
1753  else
1754  return V;
1755 }
1756 
1757 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1758 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1759 /// true and store the intrinsic information into the IntrinsicInfo that was
1760 /// passed to the function.
1762  const CallInst &I,
1763  MachineFunction &MF,
1764  unsigned Intrinsic) const {
1765  switch (Intrinsic) {
1772  Info.opc = ISD::INTRINSIC_W_CHAIN;
1773  auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1774  auto &Cont = I.getCalledFunction()->getParent()->getContext();
1775  // The intrinsic function call is of the form { ElTy, i8* }
1776  // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
1777  // should be derived from ElTy.
1779  Info.memVT = MVT::getVT(ElTy);
1780  llvm::Value *BasePtrVal = I.getOperand(0);
1781  Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
1782  // The offset value comes through Modifier register. For now, assume the
1783  // offset is 0.
1784  Info.offset = 0;
1785  Info.align = DL.getABITypeAlignment(Info.memVT.getTypeForEVT(Cont));
1787  return true;
1788  }
1801  const Module &M = *I.getParent()->getParent()->getParent();
1802  Info.opc = ISD::INTRINSIC_W_CHAIN;
1803  Type *VecTy = I.getArgOperand(1)->getType();
1804  Info.memVT = MVT::getVT(VecTy);
1805  Info.ptrVal = I.getArgOperand(0);
1806  Info.offset = 0;
1807  Info.align = M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8;
1811  return true;
1812  }
1813  default:
1814  break;
1815  }
1816  return false;
1817 }
1818 
1820  return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
1821 }
1822 
1824  if (!VT1.isSimple() || !VT2.isSimple())
1825  return false;
1826  return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32;
1827 }
1828 
1830  return isOperationLegalOrCustom(ISD::FMA, VT);
1831 }
1832 
1833 // Should we expand the build vector with shuffles?
1835  unsigned DefinedValues) const {
1836  return false;
1837 }
1838 
1840  EVT VT) const {
1841  return true;
1842 }
1843 
1846  if (VT.getVectorNumElements() == 1)
1848 
1849  // Always widen vectors of i1.
1850  MVT ElemTy = VT.getVectorElementType();
1851  if (ElemTy == MVT::i1)
1853 
1854  if (Subtarget.useHVXOps()) {
1855  // If the size of VT is at least half of the vector length,
1856  // widen the vector. Note: the threshold was not selected in
1857  // any scientific way.
1858  ArrayRef<MVT> Tys = Subtarget.getHVXElementTypes();
1859  if (llvm::find(Tys, ElemTy) != Tys.end()) {
1860  unsigned HwWidth = 8*Subtarget.getVectorLength();
1861  unsigned VecWidth = VT.getSizeInBits();
1862  if (VecWidth >= HwWidth/2 && VecWidth < HwWidth)
1864  }
1865  }
1867 }
1868 
1869 std::pair<SDValue, int>
1870 HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
1871  if (Addr.getOpcode() == ISD::ADD) {
1872  SDValue Op1 = Addr.getOperand(1);
1873  if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode()))
1874  return { Addr.getOperand(0), CN->getSExtValue() };
1875  }
1876  return { Addr, 0 };
1877 }
1878 
1879 // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
1880 // to select data from, V3 is the permutation.
1881 SDValue
1883  const {
1884  const auto *SVN = cast<ShuffleVectorSDNode>(Op);
1885  ArrayRef<int> AM = SVN->getMask();
1886  assert(AM.size() <= 8 && "Unexpected shuffle mask");
1887  unsigned VecLen = AM.size();
1888 
1889  MVT VecTy = ty(Op);
1890  assert(!Subtarget.isHVXVectorType(VecTy, true) &&
1891  "HVX shuffles should be legal");
1892  assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length");
1893 
1894  SDValue Op0 = Op.getOperand(0);
1895  SDValue Op1 = Op.getOperand(1);
1896  const SDLoc &dl(Op);
1897 
1898  // If the inputs are not the same as the output, bail. This is not an
1899  // error situation, but complicates the handling and the default expansion
1900  // (into BUILD_VECTOR) should be adequate.
1901  if (ty(Op0) != VecTy || ty(Op1) != VecTy)
1902  return SDValue();
1903 
1904  // Normalize the mask so that the first non-negative index comes from
1905  // the first operand.
1906  SmallVector<int,8> Mask(AM.begin(), AM.end());
1907  unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();
1908  if (F == AM.size())
1909  return DAG.getUNDEF(VecTy);
1910  if (AM[F] >= int(VecLen)) {
1912  std::swap(Op0, Op1);
1913  }
1914 
1915  // Express the shuffle mask in terms of bytes.
1916  SmallVector<int,8> ByteMask;
1917  unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
1918  for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
1919  int M = Mask[i];
1920  if (M < 0) {
1921  for (unsigned j = 0; j != ElemBytes; ++j)
1922  ByteMask.push_back(-1);
1923  } else {
1924  for (unsigned j = 0; j != ElemBytes; ++j)
1925  ByteMask.push_back(M*ElemBytes + j);
1926  }
1927  }
1928  assert(ByteMask.size() <= 8);
1929 
1930  // All non-undef (non-negative) indexes are well within [0..127], so they
1931  // fit in a single byte. Build two 64-bit words:
1932  // - MaskIdx where each byte is the corresponding index (for non-negative
1933  // indexes), and 0xFF for negative indexes, and
1934  // - MaskUnd that has 0xFF for each negative index.
1935  uint64_t MaskIdx = 0;
1936  uint64_t MaskUnd = 0;
1937  for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
1938  unsigned S = 8*i;
1939  uint64_t M = ByteMask[i] & 0xFF;
1940  if (M == 0xFF)
1941  MaskUnd |= M << S;
1942  MaskIdx |= M << S;
1943  }
1944 
1945  if (ByteMask.size() == 4) {
1946  // Identity.
1947  if (MaskIdx == (0x03020100 | MaskUnd))
1948  return Op0;
1949  // Byte swap.
1950  if (MaskIdx == (0x00010203 | MaskUnd)) {
1951  SDValue T0 = DAG.getBitcast(MVT::i32, Op0);
1952  SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0);
1953  return DAG.getBitcast(VecTy, T1);
1954  }
1955 
1956  // Byte packs.
1957  SDValue Concat10 = DAG.getNode(HexagonISD::COMBINE, dl,
1958  typeJoin({ty(Op1), ty(Op0)}), {Op1, Op0});
1959  if (MaskIdx == (0x06040200 | MaskUnd))
1960  return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
1961  if (MaskIdx == (0x07050301 | MaskUnd))
1962  return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
1963 
1964  SDValue Concat01 = DAG.getNode(HexagonISD::COMBINE, dl,
1965  typeJoin({ty(Op0), ty(Op1)}), {Op0, Op1});
1966  if (MaskIdx == (0x02000604 | MaskUnd))
1967  return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
1968  if (MaskIdx == (0x03010705 | MaskUnd))
1969  return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
1970  }
1971 
1972  if (ByteMask.size() == 8) {
1973  // Identity.
1974  if (MaskIdx == (0x0706050403020100ull | MaskUnd))
1975  return Op0;
1976  // Byte swap.
1977  if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
1978  SDValue T0 = DAG.getBitcast(MVT::i64, Op0);
1979  SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0);
1980  return DAG.getBitcast(VecTy, T1);
1981  }
1982 
1983  // Halfword picks.
1984  if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
1985  return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
1986  if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
1987  return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
1988  if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
1989  return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
1990  if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
1991  return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
1992  if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
1993  VectorPair P = opSplit(Op0, dl, DAG);
1994  return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);
1995  }
1996 
1997  // Byte packs.
1998  if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
1999  return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2000  if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2001  return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2002  }
2003 
2004  return SDValue();
2005 }
2006 
2007 // Create a Hexagon-specific node for shifting a vector by an integer.
2008 SDValue
2009 HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
2010  const {
2011  if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) {
2012  if (SDValue S = BVN->getSplatValue()) {
2013  unsigned NewOpc;
2014  switch (Op.getOpcode()) {
2015  case ISD::SHL:
2016  NewOpc = HexagonISD::VASL;
2017  break;
2018  case ISD::SRA:
2019  NewOpc = HexagonISD::VASR;
2020  break;
2021  case ISD::SRL:
2022  NewOpc = HexagonISD::VLSR;
2023  break;
2024  default:
2025  llvm_unreachable("Unexpected shift opcode");
2026  }
2027  return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), S);
2028  }
2029  }
2030 
2031  return SDValue();
2032 }
2033 
2034 SDValue
2036  return getVectorShiftByInt(Op, DAG);
2037 }
2038 
2039 SDValue
2041  if (isa<ConstantSDNode>(Op.getOperand(1).getNode()))
2042  return Op;
2043  return SDValue();
2044 }
2045 
2046 SDValue
2048  MVT ResTy = ty(Op);
2049  SDValue InpV = Op.getOperand(0);
2050  MVT InpTy = ty(InpV);
2051  assert(ResTy.getSizeInBits() == InpTy.getSizeInBits());
2052  const SDLoc &dl(Op);
2053 
2054  // Handle conversion from i8 to v8i1.
2055  if (ResTy == MVT::v8i1) {
2056  SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV);
2057  SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32);
2058  return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2059  }
2060 
2061  return SDValue();
2062 }
2063 
2064 bool
2065 HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
2066  MVT VecTy, SelectionDAG &DAG,
2067  MutableArrayRef<ConstantInt*> Consts) const {
2068  MVT ElemTy = VecTy.getVectorElementType();
2069  unsigned ElemWidth = ElemTy.getSizeInBits();
2070  IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth);
2071  bool AllConst = true;
2072 
2073  for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2074  SDValue V = Values[i];
2075  if (V.isUndef()) {
2076  Consts[i] = ConstantInt::get(IntTy, 0);
2077  continue;
2078  }
2079  // Make sure to always cast to IntTy.
2080  if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2081  const ConstantInt *CI = CN->getConstantIntValue();
2082  Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue());
2083  } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) {
2084  const ConstantFP *CF = CN->getConstantFPValue();
2085  APInt A = CF->getValueAPF().bitcastToAPInt();
2086  Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());
2087  } else {
2088  AllConst = false;
2089  }
2090  }
2091  return AllConst;
2092 }
2093 
2094 SDValue
2095 HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
2096  MVT VecTy, SelectionDAG &DAG) const {
2097  MVT ElemTy = VecTy.getVectorElementType();
2098  assert(VecTy.getVectorNumElements() == Elem.size());
2099 
2100  SmallVector<ConstantInt*,4> Consts(Elem.size());
2101  bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2102 
2103  unsigned First, Num = Elem.size();
2104  for (First = 0; First != Num; ++First)
2105  if (!isUndef(Elem[First]))
2106  break;
2107  if (First == Num)
2108  return DAG.getUNDEF(VecTy);
2109 
2110  if (AllConst &&
2111  llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2112  return getZero(dl, VecTy, DAG);
2113 
2114  if (ElemTy == MVT::i16) {
2115  assert(Elem.size() == 2);
2116  if (AllConst) {
2117  uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2118  Consts[1]->getZExtValue() << 16;
2119  return DAG.getBitcast(MVT::v2i16, DAG.getConstant(V, dl, MVT::i32));
2120  }
2121  SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32,
2122  {Elem[1], Elem[0]}, DAG);
2123  return DAG.getBitcast(MVT::v2i16, N);
2124  }
2125 
2126  if (ElemTy == MVT::i8) {
2127  // First try generating a constant.
2128  if (AllConst) {
2129  int32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2130  (Consts[1]->getZExtValue() & 0xFF) << 8 |
2131  (Consts[1]->getZExtValue() & 0xFF) << 16 |
2132  Consts[2]->getZExtValue() << 24;
2133  return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32));
2134  }
2135 
2136  // Then try splat.
2137  bool IsSplat = true;
2138  for (unsigned i = 0; i != Num; ++i) {
2139  if (i == First)
2140  continue;
2141  if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2142  continue;
2143  IsSplat = false;
2144  break;
2145  }
2146  if (IsSplat) {
2147  // Legalize the operand to VSPLAT.
2148  SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2149  return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2150  }
2151 
2152  // Generate
2153  // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) |
2154  // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16
2155  assert(Elem.size() == 4);
2156  SDValue Vs[4];
2157  for (unsigned i = 0; i != 4; ++i) {
2158  Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32);
2159  Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8);
2160  }
2161  SDValue S8 = DAG.getConstant(8, dl, MVT::i32);
2162  SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8});
2163  SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8});
2164  SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0});
2165  SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1});
2166 
2167  SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2168  return DAG.getBitcast(MVT::v4i8, R);
2169  }
2170 
2171 #ifndef NDEBUG
2172  dbgs() << "VecTy: " << EVT(VecTy).getEVTString() << '\n';
2173 #endif
2174  llvm_unreachable("Unexpected vector element type");
2175 }
2176 
2177 SDValue
2178 HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
2179  MVT VecTy, SelectionDAG &DAG) const {
2180  MVT ElemTy = VecTy.getVectorElementType();
2181  assert(VecTy.getVectorNumElements() == Elem.size());
2182 
2183  SmallVector<ConstantInt*,8> Consts(Elem.size());
2184  bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2185 
2186  unsigned First, Num = Elem.size();
2187  for (First = 0; First != Num; ++First)
2188  if (!isUndef(Elem[First]))
2189  break;
2190  if (First == Num)
2191  return DAG.getUNDEF(VecTy);
2192 
2193  if (AllConst &&
2194  llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2195  return getZero(dl, VecTy, DAG);
2196 
2197  // First try splat if possible.
2198  if (ElemTy == MVT::i16) {
2199  bool IsSplat = true;
2200  for (unsigned i = 0; i != Num; ++i) {
2201  if (i == First)
2202  continue;
2203  if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2204  continue;
2205  IsSplat = false;
2206  break;
2207  }
2208  if (IsSplat) {
2209  // Legalize the operand to VSPLAT.
2210  SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2211  return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2212  }
2213  }
2214 
2215  // Then try constant.
2216  if (AllConst) {
2217  uint64_t Val = 0;
2218  unsigned W = ElemTy.getSizeInBits();
2219  uint64_t Mask = (ElemTy == MVT::i8) ? 0xFFull
2220  : (ElemTy == MVT::i16) ? 0xFFFFull : 0xFFFFFFFFull;
2221  for (unsigned i = 0; i != Num; ++i)
2222  Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2223  SDValue V0 = DAG.getConstant(Val, dl, MVT::i64);
2224  return DAG.getBitcast(VecTy, V0);
2225  }
2226 
2227  // Build two 32-bit vectors and concatenate.
2228  MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2);
2229  SDValue L = (ElemTy == MVT::i32)
2230  ? Elem[0]
2231  : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2232  SDValue H = (ElemTy == MVT::i32)
2233  ? Elem[1]
2234  : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);
2235  return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, {H, L});
2236 }
2237 
2238 SDValue
2239 HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2240  const SDLoc &dl, MVT ValTy, MVT ResTy,
2241  SelectionDAG &DAG) const {
2242  MVT VecTy = ty(VecV);
2243  assert(!ValTy.isVector() ||
2244  VecTy.getVectorElementType() == ValTy.getVectorElementType());
2245  unsigned VecWidth = VecTy.getSizeInBits();
2246  unsigned ValWidth = ValTy.getSizeInBits();
2247  unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits();
2248  assert((VecWidth % ElemWidth) == 0);
2249  auto *IdxN = dyn_cast<ConstantSDNode>(IdxV);
2250 
2251  // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon
2252  // without any coprocessors).
2253  if (ElemWidth == 1) {
2254  assert(VecWidth == VecTy.getVectorNumElements() && "Sanity failure");
2255  assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2256  // Check if this is an extract of the lowest bit.
2257  if (IdxN) {
2258  // Extracting the lowest bit is a no-op, but it changes the type,
2259  // so it must be kept as an operation to avoid errors related to
2260  // type mismatches.
2261  if (IdxN->isNullValue() && ValTy.getSizeInBits() == 1)
2262  return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV);
2263  }
2264 
2265  // If the value extracted is a single bit, use tstbit.
2266  if (ValWidth == 1) {
2267  SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2268  SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32);
2269  SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0);
2270  return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0);
2271  }
2272 
2273  // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in
2274  // a predicate register. The elements of the vector are repeated
2275  // in the register (if necessary) so that the total number is 8.
2276  // The extracted subvector will need to be expanded in such a way.
2277  unsigned Scale = VecWidth / ValWidth;
2278 
2279  // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to
2280  // position 0.
2281  assert(ty(IdxV) == MVT::i32);
2282  unsigned VecRep = 8 / VecWidth;
2283  SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2284  DAG.getConstant(8*VecRep, dl, MVT::i32));
2285  SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2286  SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0);
2287  while (Scale > 1) {
2288  // The longest possible subvector is at most 32 bits, so it is always
2289  // contained in the low subregister.
2290  T1 = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, T1);
2291  T1 = expandPredicate(T1, dl, DAG);
2292  Scale /= 2;
2293  }
2294 
2295  return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1);
2296  }
2297 
2298  assert(VecWidth == 32 || VecWidth == 64);
2299 
2300  // Cast everything to scalar integer types.
2301  MVT ScalarTy = tyScalar(VecTy);
2302  VecV = DAG.getBitcast(ScalarTy, VecV);
2303 
2304  SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2305  SDValue ExtV;
2306 
2307  if (IdxN) {
2308  unsigned Off = IdxN->getZExtValue() * ElemWidth;
2309  if (VecWidth == 64 && ValWidth == 32) {
2310  assert(Off == 0 || Off == 32);
2311  unsigned SubIdx = Off == 0 ? Hexagon::isub_lo : Hexagon::isub_hi;
2312  ExtV = DAG.getTargetExtractSubreg(SubIdx, dl, MVT::i32, VecV);
2313  } else if (Off == 0 && (ValWidth % 8) == 0) {
2314  ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy));
2315  } else {
2316  SDValue OffV = DAG.getConstant(Off, dl, MVT::i32);
2317  // The return type of EXTRACTU must be the same as the type of the
2318  // input vector.
2319  ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2320  {VecV, WidthV, OffV});
2321  }
2322  } else {
2323  if (ty(IdxV) != MVT::i32)
2324  IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2325  SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2326  DAG.getConstant(ElemWidth, dl, MVT::i32));
2327  ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2328  {VecV, WidthV, OffV});
2329  }
2330 
2331  // Cast ExtV to the requested result type.
2332  ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));
2333  ExtV = DAG.getBitcast(ResTy, ExtV);
2334  return ExtV;
2335 }
2336 
2337 SDValue
2338 HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
2339  const SDLoc &dl, MVT ValTy,
2340  SelectionDAG &DAG) const {
2341  MVT VecTy = ty(VecV);
2342  if (VecTy.getVectorElementType() == MVT::i1) {
2343  MVT ValTy = ty(ValV);
2344  assert(ValTy.getVectorElementType() == MVT::i1);
2345  SDValue ValR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV);
2346  unsigned VecLen = VecTy.getVectorNumElements();
2347  unsigned Scale = VecLen / ValTy.getVectorNumElements();
2348  assert(Scale > 1);
2349 
2350  for (unsigned R = Scale; R > 1; R /= 2) {
2351  ValR = contractPredicate(ValR, dl, DAG);
2352  ValR = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2353  DAG.getUNDEF(MVT::i32), ValR);
2354  }
2355  // The longest possible subvector is at most 32 bits, so it is always
2356  // contained in the low subregister.
2357  ValR = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, ValR);
2358 
2359  unsigned ValBytes = 64 / Scale;
2360  SDValue Width = DAG.getConstant(ValBytes*8, dl, MVT::i32);
2361  SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2362  DAG.getConstant(8, dl, MVT::i32));
2363  SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2365  {VecR, ValR, Width, Idx});
2366  return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins);
2367  }
2368 
2369  unsigned VecWidth = VecTy.getSizeInBits();
2370  unsigned ValWidth = ValTy.getSizeInBits();
2371  assert(VecWidth == 32 || VecWidth == 64);
2372  assert((VecWidth % ValWidth) == 0);
2373 
2374  // Cast everything to scalar integer types.
2375  MVT ScalarTy = MVT::getIntegerVT(VecWidth);
2376  // The actual type of ValV may be different than ValTy (which is related
2377  // to the vector type).
2378  unsigned VW = ty(ValV).getSizeInBits();
2379  ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV);
2380  VecV = DAG.getBitcast(ScalarTy, VecV);
2381  if (VW != VecWidth)
2382  ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy);
2383 
2384  SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2385  SDValue InsV;
2386 
2387  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) {
2388  unsigned W = C->getZExtValue() * ValWidth;
2389  SDValue OffV = DAG.getConstant(W, dl, MVT::i32);
2390  InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2391  {VecV, ValV, WidthV, OffV});
2392  } else {
2393  if (ty(IdxV) != MVT::i32)
2394  IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2395  SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV);
2396  InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2397  {VecV, ValV, WidthV, OffV});
2398  }
2399 
2400  return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV);
2401 }
2402 
2403 SDValue
2404 HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2405  SelectionDAG &DAG) const {
2406  assert(ty(Vec32).getSizeInBits() == 32);
2407  if (isUndef(Vec32))
2408  return DAG.getUNDEF(MVT::i64);
2409  return getInstr(Hexagon::S2_vsxtbh, dl, MVT::i64, {Vec32}, DAG);
2410 }
2411 
2412 SDValue
2413 HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2414  SelectionDAG &DAG) const {
2415  assert(ty(Vec64).getSizeInBits() == 64);
2416  if (isUndef(Vec64))
2417  return DAG.getUNDEF(MVT::i32);
2418  return getInstr(Hexagon::S2_vtrunehb, dl, MVT::i32, {Vec64}, DAG);
2419 }
2420 
2421 SDValue
2422 HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
2423  const {
2424  if (Ty.isVector()) {
2425  assert(Ty.isInteger() && "Only integer vectors are supported here");
2426  unsigned W = Ty.getSizeInBits();
2427  if (W <= 64)
2428  return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W)));
2429  return DAG.getNode(HexagonISD::VZERO, dl, Ty);
2430  }
2431 
2432  if (Ty.isInteger())
2433  return DAG.getConstant(0, dl, Ty);
2434  if (Ty.isFloatingPoint())
2435  return DAG.getConstantFP(0.0, dl, Ty);
2436  llvm_unreachable("Invalid type for zero");
2437 }
2438 
2439 SDValue
2441  MVT VecTy = ty(Op);
2442  unsigned BW = VecTy.getSizeInBits();
2443  const SDLoc &dl(Op);
2445  for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2446  Ops.push_back(Op.getOperand(i));
2447 
2448  if (BW == 32)
2449  return buildVector32(Ops, dl, VecTy, DAG);
2450  if (BW == 64)
2451  return buildVector64(Ops, dl, VecTy, DAG);
2452 
2453  if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2454  // For each i1 element in the resulting predicate register, put 1
2455  // shifted by the index of the element into a general-purpose register,
2456  // then or them together and transfer it back into a predicate register.
2457  SDValue Rs[8];
2458  SDValue Z = getZero(dl, MVT::i32, DAG);
2459  // Always produce 8 bits, repeat inputs if necessary.
2460  unsigned Rep = 8 / VecTy.getVectorNumElements();
2461  for (unsigned i = 0; i != 8; ++i) {
2462  SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32);
2463  Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2464  }
2465  for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) {
2466  for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2467  Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);
2468  }
2469  // Move the value directly to a predicate register.
2470  return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2471  }
2472 
2473  return SDValue();
2474 }
2475 
2476 SDValue
2478  SelectionDAG &DAG) const {
2479  MVT VecTy = ty(Op);
2480  const SDLoc &dl(Op);
2481  if (VecTy.getSizeInBits() == 64) {
2482  assert(Op.getNumOperands() == 2);
2483  return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, Op.getOperand(1),
2484  Op.getOperand(0));
2485  }
2486 
2487  MVT ElemTy = VecTy.getVectorElementType();
2488  if (ElemTy == MVT::i1) {
2489  assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
2490  MVT OpTy = ty(Op.getOperand(0));
2491  // Scale is how many times the operands need to be contracted to match
2492  // the representation in the target register.
2493  unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements();
2494  assert(Scale == Op.getNumOperands() && Scale > 1);
2495 
2496  // First, convert all bool vectors to integers, then generate pairwise
2497  // inserts to form values of doubled length. Up until there are only
2498  // two values left to concatenate, all of these values will fit in a
2499  // 32-bit integer, so keep them as i32 to use 32-bit inserts.
2500  SmallVector<SDValue,4> Words[2];
2501  unsigned IdxW = 0;
2502 
2503  for (SDValue P : Op.getNode()->op_values()) {
2504  SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P);
2505  for (unsigned R = Scale; R > 1; R /= 2) {
2506  W = contractPredicate(W, dl, DAG);
2507  W = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2508  DAG.getUNDEF(MVT::i32), W);
2509  }
2510  W = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, W);
2511  Words[IdxW].push_back(W);
2512  }
2513 
2514  while (Scale > 2) {
2515  SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32);
2516  Words[IdxW ^ 1].clear();
2517 
2518  for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
2519  SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
2520  // Insert W1 into W0 right next to the significant bits of W0.
2522  {W0, W1, WidthV, WidthV});
2523  Words[IdxW ^ 1].push_back(T);
2524  }
2525  IdxW ^= 1;
2526  Scale /= 2;
2527  }
2528 
2529  // Another sanity check. At this point there should only be two words
2530  // left, and Scale should be 2.
2531  assert(Scale == 2 && Words[IdxW].size() == 2);
2532 
2534  Words[IdxW][1], Words[IdxW][0]);
2535  return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW);
2536  }
2537 
2538  return SDValue();
2539 }
2540 
2541 SDValue
2543  SelectionDAG &DAG) const {
2544  SDValue Vec = Op.getOperand(0);
2545  MVT ElemTy = ty(Vec).getVectorElementType();
2546  return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);
2547 }
2548 
2549 SDValue
2551  SelectionDAG &DAG) const {
2552  return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),
2553  ty(Op), ty(Op), DAG);
2554 }
2555 
2556 SDValue
2558  SelectionDAG &DAG) const {
2559  return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
2560  SDLoc(Op), ty(Op).getVectorElementType(), DAG);
2561 }
2562 
2563 SDValue
2565  SelectionDAG &DAG) const {
2566  SDValue ValV = Op.getOperand(1);
2567  return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
2568  SDLoc(Op), ty(ValV), DAG);
2569 }
2570 
2571 bool
2573  // Assuming the caller does not have either a signext or zeroext modifier, and
2574  // only one value is accepted, any reasonable truncation is allowed.
2575  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2576  return false;
2577 
2578  // FIXME: in principle up to 64-bit could be made safe, but it would be very
2579  // fragile at the moment: any support for multiple value returns would be
2580  // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2581  return Ty1->getPrimitiveSizeInBits() <= 32;
2582 }
2583 
2584 SDValue
2586  LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2587  unsigned ClaimAlign = LN->getAlignment();
2588  validateConstPtrAlignment(LN->getBasePtr(), SDLoc(Op), ClaimAlign);
2589  // Call LowerUnalignedLoad for all loads, it recognizes loads that
2590  // don't need extra aligning.
2591  return LowerUnalignedLoad(Op, DAG);
2592 }
2593 
2594 SDValue
2596  StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
2597  unsigned ClaimAlign = SN->getAlignment();
2598  SDValue Ptr = SN->getBasePtr();
2599  const SDLoc &dl(Op);
2600  validateConstPtrAlignment(Ptr, dl, ClaimAlign);
2601 
2602  MVT StoreTy = SN->getMemoryVT().getSimpleVT();
2603  unsigned NeedAlign = Subtarget.getTypeAlignment(StoreTy);
2604  if (ClaimAlign < NeedAlign)
2605  return expandUnalignedStore(SN, DAG);
2606  return Op;
2607 }
2608 
2609 SDValue
2611  const {
2612  LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2613  MVT LoadTy = ty(Op);
2614  unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy);
2615  unsigned HaveAlign = LN->getAlignment();
2616  if (HaveAlign >= NeedAlign)
2617  return Op;
2618 
2619  const SDLoc &dl(Op);
2620  const DataLayout &DL = DAG.getDataLayout();
2621  LLVMContext &Ctx = *DAG.getContext();
2622  unsigned AS = LN->getAddressSpace();
2623 
2624  // If the load aligning is disabled or the load can be broken up into two
2625  // smaller legal loads, do the default (target-independent) expansion.
2626  bool DoDefault = false;
2627  // Handle it in the default way if this is an indexed load.
2628  if (!LN->isUnindexed())
2629  DoDefault = true;
2630 
2631  if (!AlignLoads) {
2632  if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), AS, HaveAlign))
2633  return Op;
2634  DoDefault = true;
2635  }
2636  if (!DoDefault && 2*HaveAlign == NeedAlign) {
2637  // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
2638  MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8*HaveAlign)
2639  : MVT::getVectorVT(MVT::i8, HaveAlign);
2640  DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, AS, HaveAlign);
2641  }
2642  if (DoDefault) {
2643  std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
2644  return DAG.getMergeValues({P.first, P.second}, dl);
2645  }
2646 
2647  // The code below generates two loads, both aligned as NeedAlign, and
2648  // with the distance of NeedAlign between them. For that to cover the
2649  // bits that need to be loaded (and without overlapping), the size of
2650  // the loads should be equal to NeedAlign. This is true for all loadable
2651  // types, but add an assertion in case something changes in the future.
2652  assert(LoadTy.getSizeInBits() == 8*NeedAlign);
2653 
2654  unsigned LoadLen = NeedAlign;
2655  SDValue Base = LN->getBasePtr();
2656  SDValue Chain = LN->getChain();
2657  auto BO = getBaseAndOffset(Base);
2658  unsigned BaseOpc = BO.first.getOpcode();
2659  if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
2660  return Op;
2661 
2662  if (BO.second % LoadLen != 0) {
2663  BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,
2664  DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));
2665  BO.second -= BO.second % LoadLen;
2666  }
2667  SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
2668  ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first,
2669  DAG.getConstant(NeedAlign, dl, MVT::i32))
2670  : BO.first;
2671  SDValue Base0 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second, dl);
2672  SDValue Base1 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second+LoadLen, dl);
2673 
2674  MachineMemOperand *WideMMO = nullptr;
2675  if (MachineMemOperand *MMO = LN->getMemOperand()) {
2676  MachineFunction &MF = DAG.getMachineFunction();
2677  WideMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), MMO->getFlags(),
2678  2*LoadLen, LoadLen, MMO->getAAInfo(), MMO->getRanges(),
2679  MMO->getSyncScopeID(), MMO->getOrdering(),
2680  MMO->getFailureOrdering());
2681  }
2682 
2683  SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);
2684  SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);
2685 
2686  SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy,
2687  {Load1, Load0, BaseNoOff.getOperand(0)});
2688  SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2689  Load0.getValue(1), Load1.getValue(1));
2690  SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl);
2691  return M;
2692 }
2693 
2694 SDValue
2696  const SDLoc &dl(Op);
2697  unsigned Opc = Op.getOpcode();
2698  SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);
2699 
2700  if (Opc == ISD::ADDCARRY)
2701  return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(),
2702  { X, Y, C });
2703 
2704  EVT CarryTy = C.getValueType();
2705  SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(),
2706  { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
2707  SDValue Out[] = { SubC.getValue(0),
2708  DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) };
2709  return DAG.getMergeValues(Out, dl);
2710 }
2711 
2712 SDValue
2714  SDValue Chain = Op.getOperand(0);
2715  SDValue Offset = Op.getOperand(1);
2716  SDValue Handler = Op.getOperand(2);
2717  SDLoc dl(Op);
2718  auto PtrVT = getPointerTy(DAG.getDataLayout());
2719 
2720  // Mark function as containing a call to EH_RETURN.
2721  HexagonMachineFunctionInfo *FuncInfo =
2723  FuncInfo->setHasEHReturn();
2724 
2725  unsigned OffsetReg = Hexagon::R28;
2726 
2727  SDValue StoreAddr =
2728  DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
2729  DAG.getIntPtrConstant(4, dl));
2730  Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
2731  Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
2732 
2733  // Not needed we already use it as explict input to EH_RETURN.
2734  // MF.getRegInfo().addLiveOut(OffsetReg);
2735 
2736  return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
2737 }
2738 
2739 SDValue
2741  unsigned Opc = Op.getOpcode();
2742 
2743  // Handle INLINEASM first.
2744  if (Opc == ISD::INLINEASM)
2745  return LowerINLINEASM(Op, DAG);
2746 
2747  if (isHvxOperation(Op)) {
2748  // If HVX lowering returns nothing, try the default lowering.
2749  if (SDValue V = LowerHvxOperation(Op, DAG))
2750  return V;
2751  }
2752 
2753  switch (Opc) {
2754  default:
2755 #ifndef NDEBUG
2756  Op.getNode()->dumpr(&DAG);
2757  if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
2758  errs() << "Error: check for a non-legal type in this operation\n";
2759 #endif
2760  llvm_unreachable("Should not custom lower this!");
2761  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
2762  case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
2763  case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
2764  case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
2765  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2766  case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
2767  case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2768  case ISD::BITCAST: return LowerBITCAST(Op, DAG);
2769  case ISD::LOAD: return LowerLoad(Op, DAG);
2770  case ISD::STORE: return LowerStore(Op, DAG);
2771  case ISD::ADDCARRY:
2772  case ISD::SUBCARRY: return LowerAddSubCarry(Op, DAG);
2773  case ISD::SRA:
2774  case ISD::SHL:
2775  case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
2776  case ISD::ROTL: return LowerROTL(Op, DAG);
2777  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
2778  case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2779  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
2780  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
2781  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
2782  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2783  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
2784  case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
2785  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2786  case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
2787  case ISD::VASTART: return LowerVASTART(Op, DAG);
2788  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
2789  case ISD::SETCC: return LowerSETCC(Op, DAG);
2790  case ISD::VSELECT: return LowerVSELECT(Op, DAG);
2791  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2792  case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
2793  case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
2794  case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
2795  break;
2796  }
2797 
2798  return SDValue();
2799 }
2800 
2801 void
2804  SelectionDAG &DAG) const {
2805  // We are only custom-lowering stores to verify the alignment of the
2806  // address if it is a compile-time constant. Since a store can be modified
2807  // during type-legalization (the value being stored may need legalization),
2808  // return empty Results here to indicate that we don't really make any
2809  // changes in the custom lowering.
2810  if (N->getOpcode() != ISD::STORE)
2811  return TargetLowering::LowerOperationWrapper(N, Results, DAG);
2812 }
2813 
2814 void
2817  SelectionDAG &DAG) const {
2818  const SDLoc &dl(N);
2819  switch (N->getOpcode()) {
2820  case ISD::SRL:
2821  case ISD::SRA:
2822  case ISD::SHL:
2823  return;
2824  case ISD::BITCAST:
2825  // Handle a bitcast from v8i1 to i8.
2826  if (N->getValueType(0) == MVT::i8) {
2827  SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
2828  N->getOperand(0), DAG);
2829  Results.push_back(P);
2830  }
2831  break;
2832  }
2833 }
2834 
2835 /// Returns relocation base for the given PIC jumptable.
2836 SDValue
2838  SelectionDAG &DAG) const {
2839  int Idx = cast<JumpTableSDNode>(Table)->getIndex();
2840  EVT VT = Table.getValueType();
2842  return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
2843 }
2844 
2845 //===----------------------------------------------------------------------===//
2846 // Inline Assembly Support
2847 //===----------------------------------------------------------------------===//
2848 
2851  if (Constraint.size() == 1) {
2852  switch (Constraint[0]) {
2853  case 'q':
2854  case 'v':
2855  if (Subtarget.useHVXOps())
2856  return C_RegisterClass;
2857  break;
2858  case 'a':
2859  return C_RegisterClass;
2860  default:
2861  break;
2862  }
2863  }
2864  return TargetLowering::getConstraintType(Constraint);
2865 }
2866 
2867 std::pair<unsigned, const TargetRegisterClass*>
2869  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
2870 
2871  if (Constraint.size() == 1) {
2872  switch (Constraint[0]) {
2873  case 'r': // R0-R31
2874  switch (VT.SimpleTy) {
2875  default:
2876  return {0u, nullptr};
2877  case MVT::i1:
2878  case MVT::i8:
2879  case MVT::i16:
2880  case MVT::i32:
2881  case MVT::f32:
2882  return {0u, &Hexagon::IntRegsRegClass};
2883  case MVT::i64:
2884  case MVT::f64:
2885  return {0u, &Hexagon::DoubleRegsRegClass};
2886  }
2887  break;
2888  case 'a': // M0-M1
2889  if (VT != MVT::i32)
2890  return {0u, nullptr};
2891  return {0u, &Hexagon::ModRegsRegClass};
2892  case 'q': // q0-q3
2893  switch (VT.getSizeInBits()) {
2894  default:
2895  return {0u, nullptr};
2896  case 512:
2897  case 1024:
2898  return {0u, &Hexagon::HvxQRRegClass};
2899  }
2900  break;
2901  case 'v': // V0-V31
2902  switch (VT.getSizeInBits()) {
2903  default:
2904  return {0u, nullptr};
2905  case 512:
2906  return {0u, &Hexagon::HvxVRRegClass};
2907  case 1024:
2908  if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
2909  return {0u, &Hexagon::HvxVRRegClass};
2910  return {0u, &Hexagon::HvxWRRegClass};
2911  case 2048:
2912  return {0u, &Hexagon::HvxWRRegClass};
2913  }
2914  break;
2915  default:
2916  return {0u, nullptr};
2917  }
2918  }
2919 
2920  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2921 }
2922 
2923 /// isFPImmLegal - Returns true if the target can instruction select the
2924 /// specified FP immediate natively. If false, the legalizer will
2925 /// materialize the FP immediate as a load from a constant pool.
2927  return true;
2928 }
2929 
2930 /// isLegalAddressingMode - Return true if the addressing mode represented by
2931 /// AM is legal for this target, for a load/store of the specified type.
2933  const AddrMode &AM, Type *Ty,
2934  unsigned AS, Instruction *I) const {
2935  if (Ty->isSized()) {
2936  // When LSR detects uses of the same base address to access different
2937  // types (e.g. unions), it will assume a conservative type for these
2938  // uses:
2939  // LSR Use: Kind=Address of void in addrspace(4294967295), ...
2940  // The type Ty passed here would then be "void". Skip the alignment
2941  // checks, but do not return false right away, since that confuses
2942  // LSR into crashing.
2943  unsigned A = DL.getABITypeAlignment(Ty);
2944  // The base offset must be a multiple of the alignment.
2945  if ((AM.BaseOffs % A) != 0)
2946  return false;
2947  // The shifted offset must fit in 11 bits.
2948  if (!isInt<11>(AM.BaseOffs >> Log2_32(A)))
2949  return false;
2950  }
2951 
2952  // No global is ever allowed as a base.
2953  if (AM.BaseGV)
2954  return false;
2955 
2956  int Scale = AM.Scale;
2957  if (Scale < 0)
2958  Scale = -Scale;
2959  switch (Scale) {
2960  case 0: // No scale reg, "r+i", "r", or just "i".
2961  break;
2962  default: // No scaled addressing mode.
2963  return false;
2964  }
2965  return true;
2966 }
2967 
2968 /// Return true if folding a constant offset with the given GlobalAddress is
2969 /// legal. It is frequently not legal in PIC relocation models.
2971  const {
2972  return HTM.getRelocationModel() == Reloc::Static;
2973 }
2974 
2975 /// isLegalICmpImmediate - Return true if the specified immediate is legal
2976 /// icmp immediate, that is the target has icmp instructions which can compare
2977 /// a register against the immediate without having to materialize the
2978 /// immediate into a register.
2980  return Imm >= -512 && Imm <= 511;
2981 }
2982 
2983 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2984 /// for tail call optimization. Targets which want to do tail call
2985 /// optimization should implement this function.
2987  SDValue Callee,
2988  CallingConv::ID CalleeCC,
2989  bool IsVarArg,
2990  bool IsCalleeStructRet,
2991  bool IsCallerStructRet,
2992  const SmallVectorImpl<ISD::OutputArg> &Outs,
2993  const SmallVectorImpl<SDValue> &OutVals,
2995  SelectionDAG& DAG) const {
2996  const Function &CallerF = DAG.getMachineFunction().getFunction();
2997  CallingConv::ID CallerCC = CallerF.getCallingConv();
2998  bool CCMatch = CallerCC == CalleeCC;
2999 
3000  // ***************************************************************************
3001  // Look for obvious safe cases to perform tail call optimization that do not
3002  // require ABI changes.
3003  // ***************************************************************************
3004 
3005  // If this is a tail call via a function pointer, then don't do it!
3006  if (!isa<GlobalAddressSDNode>(Callee) &&
3007  !isa<ExternalSymbolSDNode>(Callee)) {
3008  return false;
3009  }
3010 
3011  // Do not optimize if the calling conventions do not match and the conventions
3012  // used are not C or Fast.
3013  if (!CCMatch) {
3014  bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3015  bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3016  // If R & E, then ok.
3017  if (!R || !E)
3018  return false;
3019  }
3020 
3021  // Do not tail call optimize vararg calls.
3022  if (IsVarArg)
3023  return false;
3024 
3025  // Also avoid tail call optimization if either caller or callee uses struct
3026  // return semantics.
3027  if (IsCalleeStructRet || IsCallerStructRet)
3028  return false;
3029 
3030  // In addition to the cases above, we also disable Tail Call Optimization if
3031  // the calling convention code that at least one outgoing argument needs to
3032  // go on the stack. We cannot check that here because at this point that
3033  // information is not available.
3034  return true;
3035 }
3036 
3037 /// Returns the target specific optimal type for load and store operations as
3038 /// a result of memset, memcpy, and memmove lowering.
3039 ///
3040 /// If DstAlign is zero that means it's safe to destination alignment can
3041 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3042 /// a need to check it against alignment requirement, probably because the
3043 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
3044 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3045 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3046 /// does not need to be loaded. It returns EVT::Other if the type should be
3047 /// determined using generic target-independent logic.
3049  unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset,
3050  bool MemcpyStrSrc, MachineFunction &MF) const {
3051 
3052  auto Aligned = [](unsigned GivenA, unsigned MinA) -> bool {
3053  return (GivenA % MinA) == 0;
3054  };
3055 
3056  if (Size >= 8 && Aligned(DstAlign, 8) && (IsMemset || Aligned(SrcAlign, 8)))
3057  return MVT::i64;
3058  if (Size >= 4 && Aligned(DstAlign, 4) && (IsMemset || Aligned(SrcAlign, 4)))
3059  return MVT::i32;
3060  if (Size >= 2 && Aligned(DstAlign, 2) && (IsMemset || Aligned(SrcAlign, 2)))
3061  return MVT::i16;
3062 
3063  return MVT::Other;
3064 }
3065 
3067  unsigned AS, unsigned Align, bool *Fast) const {
3068  if (Fast)
3069  *Fast = false;
3070  return Subtarget.isHVXVectorType(VT.getSimpleVT());
3071 }
3072 
3073 std::pair<const TargetRegisterClass*, uint8_t>
3074 HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3075  MVT VT) const {
3076  if (Subtarget.isHVXVectorType(VT, true)) {
3077  unsigned BitWidth = VT.getSizeInBits();
3078  unsigned VecWidth = Subtarget.getVectorLength() * 8;
3079 
3080  if (VT.getVectorElementType() == MVT::i1)
3081  return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3082  if (BitWidth == VecWidth)
3083  return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3084  assert(BitWidth == 2 * VecWidth);
3085  return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3086  }
3087 
3089 }
3090 
3092  ISD::LoadExtType ExtTy, EVT NewVT) const {
3093  // TODO: This may be worth removing. Check regression tests for diffs.
3094  if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
3095  return false;
3096 
3097  auto *L = cast<LoadSDNode>(Load);
3098  std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3099  // Small-data object, do not shrink.
3100  if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3101  return false;
3102  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) {
3103  auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine());
3104  const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3105  return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);
3106  }
3107  return true;
3108 }
3109 
3111  AtomicOrdering Ord) const {
3112  BasicBlock *BB = Builder.GetInsertBlock();
3113  Module *M = BB->getParent()->getParent();
3114  Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
3115  unsigned SZ = Ty->getPrimitiveSizeInBits();
3116  assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
3119  Value *Fn = Intrinsic::getDeclaration(M, IntID);
3120  return Builder.CreateCall(Fn, Addr, "larx");
3121 }
3122 
3123 /// Perform a store-conditional operation to Addr. Return the status of the
3124 /// store. This should be 0 if the store succeeded, non-zero otherwise.
3126  Value *Val, Value *Addr, AtomicOrdering Ord) const {
3127  BasicBlock *BB = Builder.GetInsertBlock();
3128  Module *M = BB->getParent()->getParent();
3129  Type *Ty = Val->getType();
3130  unsigned SZ = Ty->getPrimitiveSizeInBits();
3131  assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
3134  Value *Fn = Intrinsic::getDeclaration(M, IntID);
3135  Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
3136  Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
3137  Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
3138  return Ext;
3139 }
3140 
3143  // Do not expand loads and stores that don't exceed 64 bits.
3144  return LI->getType()->getPrimitiveSizeInBits() > 64
3147 }
3148 
3150  // Do not expand loads and stores that don't exceed 64 bits.
3151  return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
3152 }
3153 
3156  AtomicCmpXchgInst *AI) const {
3157  const DataLayout &DL = AI->getModule()->getDataLayout();
3158  unsigned Size = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
3159  if (Size >= 4 && Size <= 8)
3162 }
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const override
Return the register ID of the name passed in.
bool isMachineConstantPoolEntry() const
bool isGlobalInSmallSection(const GlobalObject *GO, const TargetMachine &TM) const
Return true if this global value should be placed into small data/bss section.
Type * getVectorElementType() const
Definition: Type.h:371
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:571
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
Value * getValueOperand()
Definition: Instructions.h:410
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:594
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
bool isInteger() const
Return true if this is an integer or a vector integer type.
SDValue LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool isUndef() const
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const
const GlobalValue * getGlobal() const
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1563
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:212
#define R4(n)
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC&#39;s if you just have an ISD::CondCode instead of an SD...
Definition: SelectionDAG.h:937
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:836
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
Definition: ISDOpcodes.h:358
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:650
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
This class represents lattice values for constants.
Definition: AllocatorList.h:24
static MVT getVectorVT(MVT VT, unsigned NumElements)
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:367
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:529
HexagonTargetObjectFile * getObjFileLowering() const override
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition: Format.h:186
bool isVector() const
Return true if this is a vector value type.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isFMAFasterThanFMulAndFAdd(EVT) const override
Return true if an FMA operation is faster than a pair of mul and add instructions.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:699
SDVTList getVTList() const
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:251
unsigned getVectorNumElements() const
ArrayRef< MVT > getHVXElementTypes() const
unsigned getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
const SDValue & getChain() const
Function Alias Analysis Results
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:705
unsigned getAlignment() const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
Hexagon target-specific information for each MachineFunction.
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const
unsigned second
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1186
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
unsigned const TargetRegisterInfo * TRI
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
A debug info location.
Definition: DebugLoc.h:34
F(f)
Type * getStructElementType(unsigned N) const
Definition: DerivedTypes.h:333
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select&#39;s if you just have operands and don&#39;t want to check...
Definition: SelectionDAG.h:950
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
An instruction for reading from memory.
Definition: Instructions.h:168
SDNode * getNode() const
get the SDNode which holds the desired result
#define R2(n)
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:230
AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Same for subtraction.
Definition: ISDOpcodes.h:254
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1 at the ...
Definition: ISDOpcodes.h:353
The address of the GOT.
Definition: ISDOpcodes.h:66
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:781
#define HEXAGON_LRFP_SIZE
SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:435
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
bool isMemLoc() const
unsigned getAddressSpace() const
Return the address space for the associated pointer.
SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, const SDLoc &DL)
Returns sum of the base pointer and offset.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:579
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:210
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1135
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:456
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:840
A description of a memory reference used in the backend.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:371
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
Shift and rotation operations.
Definition: ISDOpcodes.h:410
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:244
Base class for LoadSDNode and StoreSDNode.
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
Definition: ISDOpcodes.h:393
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
const HexagonRegisterInfo * getRegisterInfo() const override
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:191
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:743
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
SimpleValueType SimpleTy
InstrTy * getInstruction() const
Definition: CallSite.h:92
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:460
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:401
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
Definition: StringSwitch.h:203
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
zlib-gnu style compression
This file implements a class to represent arbitrary precision integral constant values and operations...
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:667
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations, those with specific masks.
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:695
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
int64_t getSExtValue() const
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
static Value * getBrevLdObject(Value *V)
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1575
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:398
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:478
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const
#define T
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:121
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:429
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
SmallVector< ISD::OutputArg, 32 > Outs
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:138
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:852
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
An instruction for storing to memory.
Definition: Instructions.h:321
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:959
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors...
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1659
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:747
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::ZeroOrMore, cl::init(5), cl::desc("Set minimum jump tables"))
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
amdgpu Simplify well known AMD library false Value * Callee
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1020
MVT getVectorElementType() const
SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Value * getOperand(unsigned i) const
Definition: User.h:170
Analysis containing CSE Info
Definition: CSEInfo.cpp:21
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
unsigned getByValSize() const
This class is used to represent ISD::STORE nodes.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:524
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:118
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:327
The memory access is volatile.
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:364
#define P(N)
const SDValue & getBasePtr() const
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:169
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:166
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0&#39;s from the least significant bit to the most stopping at the first 1...
Definition: MathExtras.h:120
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:291
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:429
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
Definition: ISDOpcodes.h:283
static bool isBrevLdIntrinsic(const Value *Inst)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:728
iterator_range< value_op_iterator > op_values() const
const SDValue & getOperand(unsigned Num) const
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:934
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::ZeroOrMore, cl::init(4), cl::desc("Max #stores to inline memmove"))
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:332
#define H(x, y, z)
Definition: MD5.cpp:57
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:41
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:824
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it&#39;s free to truncate a value of type FromTy to type ToTy.
#define HEXAGON_GOT_SYM_NAME
static mvt_range fp_valuetypes()
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
Definition: InlineAsm.h:336
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:767
const APInt & getAPIntValue() const
static unsigned getKind(unsigned Flags)
Definition: InlineAsm.h:325
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
Definition: ValueTypes.cpp:115
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:57
static mvt_range vector_valuetypes()
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1839
Class to represent integer types.
Definition: DerivedTypes.h:40
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:281
Constant Vector Declarations.
Definition: Constants.h:500
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:719
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1214
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
size_t size() const
Definition: SmallVector.h:53
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1207
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:499
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const
LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
unsigned first
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array...
The memory access writes data.
const APFloat & getValueAPF() const
Definition: Constants.h:303
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:639
SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:633
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
void dump() const
Dump this node, for debugging.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:240
bool isHVXVectorType(MVT VecTy, bool IncludeBool=false) const
unsigned getNumOperands() const
Definition: User.h:192
CCState - This class holds information needed while lowering arguments and return values...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
static unsigned getIntrinsicID(const SDNode *N)
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
auto size(R &&Range, typename std::enable_if< std::is_same< typename std::iterator_traits< decltype(Range.begin())>::iterator_category, std::random_access_iterator_tag >::value, void >::type *=nullptr) -> decltype(std::distance(Range.begin(), Range.end()))
Get the size of a range.
Definition: STLExtras.h:1167
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:339
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:213
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
Module.h This file contains the declarations for the Module class.
const DebugLoc & getDebugLoc() const
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:1044
iterator end() const
Definition: ArrayRef.h:138
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:730
const DataFlowGraph & G
Definition: RDFGraph.cpp:211
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:413
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:307
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
const Constant * getConstVal() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:622
Represents one node in the SelectionDAG.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:679
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
static mvt_range integer_valuetypes()
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:539
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:164
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
EVT getMemoryVT() const
Return the type of the in-memory value.
Class for arbitrary precision integers.
Definition: APInt.h:70
unsigned getByValAlign() const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:420
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
void setPrefLoopAlignment(unsigned Align)
Set the target&#39;s preferred loop alignment.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:468
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:471
bool isTailCall() const
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InFlag, EVT PtrVT, unsigned ReturnReg, unsigned char OperandFlags) const
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors...
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:312
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::ZeroOrMore, cl::init(8), cl::desc("Max #stores to inline memset"))
SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:638
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:724
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:673
SmallVector< SDValue, 32 > OutVals
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Enable Hexagon SDNode scheduling"))
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
unsigned getVectorLength() const
void ensureMaxAlignment(unsigned Align)
Make sure the function is at least Align bytes aligned.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:387
MO_GOT - Indicates a GOT-relative relocation.
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const
unsigned getTypeAlignment(MVT Ty) const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:705
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:206
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:486
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:188
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:614
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1181
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, bool *Fast) const override
Determine if the target supports unaligned memory accesses.
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
const TargetSubtargetInfo & getSubtarget() const
Definition: SelectionDAG.h:403
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::ZeroOrMore, cl::init(4), cl::desc("Max #stores to inline memcpy"))
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
uint32_t Size
Definition: Profile.cpp:47
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1974
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:608
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:309
SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents &#39;eh_return&#39; gcc dwarf builtin...
Definition: ISDOpcodes.h:102
const GlobalObject * getBaseObject() const
Definition: Globals.cpp:261
bool isRegLoc() const
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
Definition: SelectionDAG.h:857
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:345
MachineConstantPoolValue * getMachineCPVal() const
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, unsigned Alignment=1, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void dumpr() const
Dump (recursively) this node and its use-def subgraph.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
#define Hexagon_PointerSize
void setReturnAddressIsTaken(bool s)
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::ZeroOrMore, cl::init(6), cl::desc("Max #stores to inline memcpy"))
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:483
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
aarch64 promote const
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:566
LLVM Value Representation.
Definition: Value.h:73
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:302
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:419
FunctionType * getFunctionType() const
Definition: CallSite.h:320
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:41
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:776
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:331
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
uint64_t getTypeAllocSizeInBits(Type *Ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
Definition: DataLayout.h:446
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Definition: IRBuilder.h:297
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame)...
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:443
APInt bitcastToAPInt() const
Definition: APFloat.h:1094
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
unsigned getNumOperands() const
Conversion operators.
Definition: ISDOpcodes.h:465
const SDValue & getOperand(unsigned i) const
unsigned getLocReg() const
uint64_t getZExtValue() const
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:474
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization...
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::ZeroOrMore, cl::init(6), cl::desc("Max #stores to inline memmove"))
#define LLVM_DEBUG(X)
Definition: Debug.h:123
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::ZeroOrMore, cl::init(4), cl::desc("Max #stores to inline memset"))
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:584
static cl::opt< bool > EnableFastMath("ffast-math", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Enable Fast Math processing"))
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
Function Alias Analysis false
static Constant * convert_i1_to_i8(const Constant *ConstVal)
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const
LLVMContext * getContext() const
Definition: SelectionDAG.h:407
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition: ISDOpcodes.h:375
static Constant * get(ArrayRef< Constant *> V)
Definition: Constants.cpp:1079
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const
#define T1
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:242
const BasicBlock * getParent() const
Definition: Instruction.h:67
HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:622
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:914
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:623