LLVM  8.0.1
XCoreISelLowering.cpp
Go to the documentation of this file.
1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the XCoreTargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "XCoreISelLowering.h"
15 #include "XCore.h"
17 #include "XCoreSubtarget.h"
18 #include "XCoreTargetMachine.h"
19 #include "XCoreTargetObjectFile.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/KnownBits.h"
39 #include <algorithm>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "xcore-lower"
44 
45 const char *XCoreTargetLowering::
46 getTargetNodeName(unsigned Opcode) const
47 {
48  switch ((XCoreISD::NodeType)Opcode)
49  {
50  case XCoreISD::FIRST_NUMBER : break;
51  case XCoreISD::BL : return "XCoreISD::BL";
52  case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
53  case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
54  case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
55  case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
56  case XCoreISD::STWSP : return "XCoreISD::STWSP";
57  case XCoreISD::RETSP : return "XCoreISD::RETSP";
58  case XCoreISD::LADD : return "XCoreISD::LADD";
59  case XCoreISD::LSUB : return "XCoreISD::LSUB";
60  case XCoreISD::LMUL : return "XCoreISD::LMUL";
61  case XCoreISD::MACCU : return "XCoreISD::MACCU";
62  case XCoreISD::MACCS : return "XCoreISD::MACCS";
63  case XCoreISD::CRC8 : return "XCoreISD::CRC8";
64  case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
65  case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
66  case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
67  case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
68  case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
69  }
70  return nullptr;
71 }
72 
74  const XCoreSubtarget &Subtarget)
75  : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
76 
77  // Set up the register classes.
78  addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
79 
80  // Compute derived properties from the register classes
82 
84 
86 
87  // Use i32 for setcc operations results (slt, sgt, ...).
89  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
90 
91  // XCore does not have the NodeTypes below.
94 
95  // 64bit
105 
106  // Bit Manipulation
110 
112 
113  // Jump tables.
115 
118 
119  // Conversion of i64 -> double produces constantpool nodes
121 
122  // Loads
123  for (MVT VT : MVT::integer_valuetypes()) {
127 
130  }
131 
132  // Custom expand misaligned loads / stores.
135 
136  // Varargs
141 
142  // Dynamic stack
146 
147  // Exception handling
150 
151  // Atomic operations
152  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
153  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
157 
158  // TRAMPOLINE is custom lowered.
161 
162  // We want to custom lower some of our intrinsics.
164 
168 
169  // We have target-specific dag combine patterns for the following nodes:
174 
177 }
178 
180  if (Val.getOpcode() != ISD::LOAD)
181  return false;
182 
183  EVT VT1 = Val.getValueType();
184  if (!VT1.isSimple() || !VT1.isInteger() ||
185  !VT2.isSimple() || !VT2.isInteger())
186  return false;
187 
188  switch (VT1.getSimpleVT().SimpleTy) {
189  default: break;
190  case MVT::i8:
191  return true;
192  }
193 
194  return false;
195 }
196 
199  switch (Op.getOpcode())
200  {
201  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
202  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
203  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
204  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
205  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
206  case ISD::LOAD: return LowerLOAD(Op, DAG);
207  case ISD::STORE: return LowerSTORE(Op, DAG);
208  case ISD::VAARG: return LowerVAARG(Op, DAG);
209  case ISD::VASTART: return LowerVASTART(Op, DAG);
210  case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
211  case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
212  // FIXME: Remove these when LegalizeDAGTypes lands.
213  case ISD::ADD:
214  case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
215  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
216  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
217  case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
218  case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
219  case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
220  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
221  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
222  case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
223  case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
224  default:
225  llvm_unreachable("unimplemented operand");
226  }
227 }
228 
229 /// ReplaceNodeResults - Replace the results of node with an illegal result
230 /// type with new values built out of custom code.
233  SelectionDAG &DAG) const {
234  switch (N->getOpcode()) {
235  default:
236  llvm_unreachable("Don't know how to custom expand this!");
237  case ISD::ADD:
238  case ISD::SUB:
239  Results.push_back(ExpandADDSUB(N, DAG));
240  return;
241  }
242 }
243 
244 //===----------------------------------------------------------------------===//
245 // Misc Lower Operation implementation
246 //===----------------------------------------------------------------------===//
247 
248 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
249  const GlobalValue *GV,
250  SelectionDAG &DAG) const {
251  // FIXME there is no actual debug info here
252  SDLoc dl(GA);
253 
254  if (GV->getValueType()->isFunctionTy())
255  return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
256 
257  const auto *GVar = dyn_cast<GlobalVariable>(GV);
258  if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
259  (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
260  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
261 
262  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
263 }
264 
265 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
267  return true;
268 
269  Type *ObjType = GV->getValueType();
270  if (!ObjType->isSized())
271  return false;
272 
273  auto &DL = GV->getParent()->getDataLayout();
274  unsigned ObjSize = DL.getTypeAllocSize(ObjType);
275  return ObjSize < CodeModelLargeSize && ObjSize != 0;
276 }
277 
278 SDValue XCoreTargetLowering::
279 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
280 {
281  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
282  const GlobalValue *GV = GN->getGlobal();
283  SDLoc DL(GN);
284  int64_t Offset = GN->getOffset();
285  if (IsSmallObject(GV, *this)) {
286  // We can only fold positive offsets that are a multiple of the word size.
287  int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
288  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
289  GA = getGlobalAddressWrapper(GA, GV, DAG);
290  // Handle the rest of the offset.
291  if (Offset != FoldedOffset) {
292  SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
293  GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
294  }
295  return GA;
296  } else {
297  // Ideally we would not fold in offset with an index <= 11.
298  Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
299  Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
300  Ty = Type::getInt32Ty(*DAG.getContext());
301  Constant *Idx = ConstantInt::get(Ty, Offset);
303  Type::getInt8Ty(*DAG.getContext()), GA, Idx);
304  SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
305  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
307  }
308 }
309 
310 SDValue XCoreTargetLowering::
311 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
312 {
313  SDLoc DL(Op);
314  auto PtrVT = getPointerTy(DAG.getDataLayout());
315  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
316  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
317 
318  return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
319 }
320 
321 SDValue XCoreTargetLowering::
322 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
323 {
324  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
325  // FIXME there isn't really debug info here
326  SDLoc dl(CP);
327  EVT PtrVT = Op.getValueType();
328  SDValue Res;
329  if (CP->isMachineConstantPoolEntry()) {
330  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
331  CP->getAlignment(), CP->getOffset());
332  } else {
333  Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
334  CP->getAlignment(), CP->getOffset());
335  }
336  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
337 }
338 
341 }
342 
343 SDValue XCoreTargetLowering::
344 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
345 {
346  SDValue Chain = Op.getOperand(0);
347  SDValue Table = Op.getOperand(1);
348  SDValue Index = Op.getOperand(2);
349  SDLoc dl(Op);
350  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
351  unsigned JTI = JT->getIndex();
353  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
354  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
355 
356  unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
357  if (NumEntries <= 32) {
358  return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
359  }
360  assert((NumEntries >> 31) == 0);
361  SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
362  DAG.getConstant(1, dl, MVT::i32));
363  return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
364  ScaledIndex);
365 }
366 
367 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
368  const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
369  SelectionDAG &DAG) const {
370  auto PtrVT = getPointerTy(DAG.getDataLayout());
371  if ((Offset & 0x3) == 0) {
372  return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
373  }
374  // Lower to pair of consecutive word aligned loads plus some bit shifting.
375  int32_t HighOffset = alignTo(Offset, 4);
376  int32_t LowOffset = HighOffset - 4;
377  SDValue LowAddr, HighAddr;
378  if (GlobalAddressSDNode *GASD =
379  dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
380  LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
381  LowOffset);
382  HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
383  HighOffset);
384  } else {
385  LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
386  DAG.getConstant(LowOffset, DL, MVT::i32));
387  HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
388  DAG.getConstant(HighOffset, DL, MVT::i32));
389  }
390  SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
391  SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
392 
393  SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
394  SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
395  SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
396  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
397  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
398  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
399  High.getValue(1));
400  SDValue Ops[] = { Result, Chain };
401  return DAG.getMergeValues(Ops, DL);
402 }
403 
405 {
406  KnownBits Known = DAG.computeKnownBits(Value);
407  return Known.countMinTrailingZeros() >= 2;
408 }
409 
410 SDValue XCoreTargetLowering::
411 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
412  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
413  LoadSDNode *LD = cast<LoadSDNode>(Op);
415  "Unexpected extension type");
416  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
418  LD->getAddressSpace(),
419  LD->getAlignment()))
420  return SDValue();
421 
422  auto &TD = DAG.getDataLayout();
423  unsigned ABIAlignment = TD.getABITypeAlignment(
424  LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
425  // Leave aligned load alone.
426  if (LD->getAlignment() >= ABIAlignment)
427  return SDValue();
428 
429  SDValue Chain = LD->getChain();
430  SDValue BasePtr = LD->getBasePtr();
431  SDLoc DL(Op);
432 
433  if (!LD->isVolatile()) {
434  const GlobalValue *GV;
435  int64_t Offset = 0;
436  if (DAG.isBaseWithConstantOffset(BasePtr) &&
437  isWordAligned(BasePtr->getOperand(0), DAG)) {
438  SDValue NewBasePtr = BasePtr->getOperand(0);
439  Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
440  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
441  Offset, DAG);
442  }
443  if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
444  MinAlign(GV->getAlignment(), 4) == 4) {
445  SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
446  BasePtr->getValueType(0));
447  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
448  Offset, DAG);
449  }
450  }
451 
452  if (LD->getAlignment() == 2) {
453  SDValue Low =
454  DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
455  LD->getPointerInfo(), MVT::i16,
456  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
457  SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
458  DAG.getConstant(2, DL, MVT::i32));
459  SDValue High =
460  DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
462  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
463  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
464  DAG.getConstant(16, DL, MVT::i32));
465  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
466  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
467  High.getValue(1));
468  SDValue Ops[] = { Result, Chain };
469  return DAG.getMergeValues(Ops, DL);
470  }
471 
472  // Lower to a call to __misaligned_load(BasePtr).
473  Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
475  TargetLowering::ArgListEntry Entry;
476 
477  Entry.Ty = IntPtrTy;
478  Entry.Node = BasePtr;
479  Args.push_back(Entry);
480 
482  CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
483  CallingConv::C, IntPtrTy,
484  DAG.getExternalSymbol("__misaligned_load",
485  getPointerTy(DAG.getDataLayout())),
486  std::move(Args));
487 
488  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
489  SDValue Ops[] = { CallResult.first, CallResult.second };
490  return DAG.getMergeValues(Ops, DL);
491 }
492 
493 SDValue XCoreTargetLowering::
494 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
495 {
496  StoreSDNode *ST = cast<StoreSDNode>(Op);
497  assert(!ST->isTruncatingStore() && "Unexpected store type");
498  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
500  ST->getAddressSpace(),
501  ST->getAlignment())) {
502  return SDValue();
503  }
504  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
505  ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
506  // Leave aligned store alone.
507  if (ST->getAlignment() >= ABIAlignment) {
508  return SDValue();
509  }
510  SDValue Chain = ST->getChain();
511  SDValue BasePtr = ST->getBasePtr();
512  SDValue Value = ST->getValue();
513  SDLoc dl(Op);
514 
515  if (ST->getAlignment() == 2) {
516  SDValue Low = Value;
517  SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
518  DAG.getConstant(16, dl, MVT::i32));
519  SDValue StoreLow = DAG.getTruncStore(
520  Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
521  /* Alignment = */ 2, ST->getMemOperand()->getFlags());
522  SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
523  DAG.getConstant(2, dl, MVT::i32));
524  SDValue StoreHigh = DAG.getTruncStore(
525  Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
526  MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
527  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
528  }
529 
530  // Lower to a call to __misaligned_store(BasePtr, Value).
531  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
533  TargetLowering::ArgListEntry Entry;
534 
535  Entry.Ty = IntPtrTy;
536  Entry.Node = BasePtr;
537  Args.push_back(Entry);
538 
539  Entry.Node = Value;
540  Args.push_back(Entry);
541 
543  CLI.setDebugLoc(dl).setChain(Chain).setCallee(
545  DAG.getExternalSymbol("__misaligned_store",
546  getPointerTy(DAG.getDataLayout())),
547  std::move(Args));
548 
549  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
550  return CallResult.second;
551 }
552 
553 SDValue XCoreTargetLowering::
554 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
555 {
556  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
557  "Unexpected operand to lower!");
558  SDLoc dl(Op);
559  SDValue LHS = Op.getOperand(0);
560  SDValue RHS = Op.getOperand(1);
561  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
562  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
563  DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
564  LHS, RHS);
565  SDValue Lo(Hi.getNode(), 1);
566  SDValue Ops[] = { Lo, Hi };
567  return DAG.getMergeValues(Ops, dl);
568 }
569 
570 SDValue XCoreTargetLowering::
571 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
572 {
573  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
574  "Unexpected operand to lower!");
575  SDLoc dl(Op);
576  SDValue LHS = Op.getOperand(0);
577  SDValue RHS = Op.getOperand(1);
578  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
579  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
580  DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
581  Zero, Zero);
582  SDValue Lo(Hi.getNode(), 1);
583  SDValue Ops[] = { Lo, Hi };
584  return DAG.getMergeValues(Ops, dl);
585 }
586 
587 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
588 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
589 /// each intermediate result in the calculation must also have a single use.
590 /// If the Op is in the correct form the constituent parts are written to Mul0,
591 /// Mul1, Addend0 and Addend1.
592 static bool
593 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
594  SDValue &Addend1, bool requireIntermediatesHaveOneUse)
595 {
596  if (Op.getOpcode() != ISD::ADD)
597  return false;
598  SDValue N0 = Op.getOperand(0);
599  SDValue N1 = Op.getOperand(1);
600  SDValue AddOp;
601  SDValue OtherOp;
602  if (N0.getOpcode() == ISD::ADD) {
603  AddOp = N0;
604  OtherOp = N1;
605  } else if (N1.getOpcode() == ISD::ADD) {
606  AddOp = N1;
607  OtherOp = N0;
608  } else {
609  return false;
610  }
611  if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
612  return false;
613  if (OtherOp.getOpcode() == ISD::MUL) {
614  // add(add(a,b),mul(x,y))
615  if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
616  return false;
617  Mul0 = OtherOp.getOperand(0);
618  Mul1 = OtherOp.getOperand(1);
619  Addend0 = AddOp.getOperand(0);
620  Addend1 = AddOp.getOperand(1);
621  return true;
622  }
623  if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
624  // add(add(mul(x,y),a),b)
625  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
626  return false;
627  Mul0 = AddOp.getOperand(0).getOperand(0);
628  Mul1 = AddOp.getOperand(0).getOperand(1);
629  Addend0 = AddOp.getOperand(1);
630  Addend1 = OtherOp;
631  return true;
632  }
633  if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
634  // add(add(a,mul(x,y)),b)
635  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
636  return false;
637  Mul0 = AddOp.getOperand(1).getOperand(0);
638  Mul1 = AddOp.getOperand(1).getOperand(1);
639  Addend0 = AddOp.getOperand(0);
640  Addend1 = OtherOp;
641  return true;
642  }
643  return false;
644 }
645 
646 SDValue XCoreTargetLowering::
647 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
648 {
649  SDValue Mul;
650  SDValue Other;
651  if (N->getOperand(0).getOpcode() == ISD::MUL) {
652  Mul = N->getOperand(0);
653  Other = N->getOperand(1);
654  } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
655  Mul = N->getOperand(1);
656  Other = N->getOperand(0);
657  } else {
658  return SDValue();
659  }
660  SDLoc dl(N);
661  SDValue LL, RL, AddendL, AddendH;
662  LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
663  Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
664  RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
665  Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
666  AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
667  Other, DAG.getConstant(0, dl, MVT::i32));
668  AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
669  Other, DAG.getConstant(1, dl, MVT::i32));
670  APInt HighMask = APInt::getHighBitsSet(64, 32);
671  unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
672  unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
673  if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
674  DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
675  // The inputs are both zero-extended.
676  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
677  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
678  AddendL, LL, RL);
679  SDValue Lo(Hi.getNode(), 1);
680  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
681  }
682  if (LHSSB > 32 && RHSSB > 32) {
683  // The inputs are both sign-extended.
684  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
685  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
686  AddendL, LL, RL);
687  SDValue Lo(Hi.getNode(), 1);
688  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
689  }
690  SDValue LH, RH;
691  LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
692  Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
693  RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
694  Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
695  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
696  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
697  AddendL, LL, RL);
698  SDValue Lo(Hi.getNode(), 1);
699  RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
700  LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
701  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
702  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
703  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
704 }
705 
706 SDValue XCoreTargetLowering::
707 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
708 {
709  assert(N->getValueType(0) == MVT::i64 &&
710  (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
711  "Unknown operand to lower!");
712 
713  if (N->getOpcode() == ISD::ADD)
714  if (SDValue Result = TryExpandADDWithMul(N, DAG))
715  return Result;
716 
717  SDLoc dl(N);
718 
719  // Extract components
721  N->getOperand(0),
722  DAG.getConstant(0, dl, MVT::i32));
724  N->getOperand(0),
725  DAG.getConstant(1, dl, MVT::i32));
727  N->getOperand(1),
728  DAG.getConstant(0, dl, MVT::i32));
730  N->getOperand(1),
731  DAG.getConstant(1, dl, MVT::i32));
732 
733  // Expand
734  unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
736  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
737  SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
738  LHSL, RHSL, Zero);
739  SDValue Carry(Lo.getNode(), 1);
740 
741  SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
742  LHSH, RHSH, Carry);
743  SDValue Ignored(Hi.getNode(), 1);
744  // Merge the pieces
745  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
746 }
747 
748 SDValue XCoreTargetLowering::
749 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
750 {
751  // Whist llvm does not support aggregate varargs we can ignore
752  // the possibility of the ValueType being an implicit byVal vararg.
753  SDNode *Node = Op.getNode();
754  EVT VT = Node->getValueType(0); // not an aggregate
755  SDValue InChain = Node->getOperand(0);
756  SDValue VAListPtr = Node->getOperand(1);
757  EVT PtrVT = VAListPtr.getValueType();
758  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
759  SDLoc dl(Node);
760  SDValue VAList =
761  DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
762  // Increment the pointer, VAList, to the next vararg
763  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
764  DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
765  dl));
766  // Store the incremented VAList to the legalized pointer
767  InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
768  MachinePointerInfo(SV));
769  // Load the actual argument out of the pointer VAList
770  return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
771 }
772 
773 SDValue XCoreTargetLowering::
774 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
775 {
776  SDLoc dl(Op);
777  // vastart stores the address of the VarArgsFrameIndex slot into the
778  // memory location argument
782  return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
784 }
785 
786 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
787  SelectionDAG &DAG) const {
788  // This nodes represent llvm.frameaddress on the DAG.
789  // It takes one operand, the index of the frame address to return.
790  // An index of zero corresponds to the current function's frame address.
791  // An index of one to the parent's frame address, and so on.
792  // Depths > 0 not supported yet!
793  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
794  return SDValue();
795 
797  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
798  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
799  RegInfo->getFrameRegister(MF), MVT::i32);
800 }
801 
802 SDValue XCoreTargetLowering::
803 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
804  // This nodes represent llvm.returnaddress on the DAG.
805  // It takes one operand, the index of the return address to return.
806  // An index of zero corresponds to the current function's return address.
807  // An index of one to the parent's return address, and so on.
808  // Depths > 0 not supported yet!
809  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
810  return SDValue();
811 
814  int FI = XFI->createLRSpillSlot(MF);
815  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
816  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
817  DAG.getEntryNode(), FIN,
819 }
820 
821 SDValue XCoreTargetLowering::
822 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
823  // This node represents offset from frame pointer to first on-stack argument.
824  // This is needed for correct stack adjustment during unwind.
825  // However, we don't know the offset until after the frame has be finalised.
826  // This is done during the XCoreFTAOElim pass.
828 }
829 
830 SDValue XCoreTargetLowering::
831 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
832  // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
833  // This node represents 'eh_return' gcc dwarf builtin, which is used to
834  // return from exception. The general meaning is: adjust stack by OFFSET and
835  // pass execution to HANDLER.
837  SDValue Chain = Op.getOperand(0);
838  SDValue Offset = Op.getOperand(1);
839  SDValue Handler = Op.getOperand(2);
840  SDLoc dl(Op);
841 
842  // Absolute SP = (FP + FrameToArgs) + Offset
843  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
844  SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
845  RegInfo->getFrameRegister(MF), MVT::i32);
846  SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
847  MVT::i32);
848  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
849  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
850 
851  // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
852  // which leaves 2 caller saved registers, R2 & R3 for us to use.
853  unsigned StackReg = XCore::R2;
854  unsigned HandlerReg = XCore::R3;
855 
856  SDValue OutChains[] = {
857  DAG.getCopyToReg(Chain, dl, StackReg, Stack),
858  DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
859  };
860 
861  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
862 
863  return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
864  DAG.getRegister(StackReg, MVT::i32),
865  DAG.getRegister(HandlerReg, MVT::i32));
866 
867 }
868 
869 SDValue XCoreTargetLowering::
870 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
871  return Op.getOperand(0);
872 }
873 
874 SDValue XCoreTargetLowering::
875 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
876  SDValue Chain = Op.getOperand(0);
877  SDValue Trmp = Op.getOperand(1); // trampoline
878  SDValue FPtr = Op.getOperand(2); // nested function
879  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
880 
881  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
882 
883  // .align 4
884  // LDAPF_u10 r11, nest
885  // LDW_2rus r11, r11[0]
886  // STWSP_ru6 r11, sp[0]
887  // LDAPF_u10 r11, fptr
888  // LDW_2rus r11, r11[0]
889  // BAU_1r r11
890  // nest:
891  // .word nest
892  // fptr:
893  // .word fptr
894  SDValue OutChains[5];
895 
896  SDValue Addr = Trmp;
897 
898  SDLoc dl(Op);
899  OutChains[0] =
900  DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
901  MachinePointerInfo(TrmpAddr));
902 
903  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
904  DAG.getConstant(4, dl, MVT::i32));
905  OutChains[1] =
906  DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
907  MachinePointerInfo(TrmpAddr, 4));
908 
909  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
910  DAG.getConstant(8, dl, MVT::i32));
911  OutChains[2] =
912  DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
913  MachinePointerInfo(TrmpAddr, 8));
914 
915  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
916  DAG.getConstant(12, dl, MVT::i32));
917  OutChains[3] =
918  DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
919 
920  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
921  DAG.getConstant(16, dl, MVT::i32));
922  OutChains[4] =
923  DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
924 
925  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
926 }
927 
928 SDValue XCoreTargetLowering::
929 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
930  SDLoc DL(Op);
931  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
932  switch (IntNo) {
934  EVT VT = Op.getValueType();
935  SDValue Data =
936  DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
937  Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
938  SDValue Crc(Data.getNode(), 1);
939  SDValue Results[] = { Crc, Data };
940  return DAG.getMergeValues(Results, DL);
941  }
942  return SDValue();
943 }
944 
945 SDValue XCoreTargetLowering::
946 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
947  SDLoc DL(Op);
948  return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
949 }
950 
951 SDValue XCoreTargetLowering::
952 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
953  AtomicSDNode *N = cast<AtomicSDNode>(Op);
954  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
957  "setInsertFencesForAtomic(true) expects unordered / monotonic");
958  if (N->getMemoryVT() == MVT::i32) {
959  if (N->getAlignment() < 4)
960  report_fatal_error("atomic load must be aligned");
961  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
962  N->getChain(), N->getBasePtr(), N->getPointerInfo(),
963  N->getAlignment(), N->getMemOperand()->getFlags(),
964  N->getAAInfo(), N->getRanges());
965  }
966  if (N->getMemoryVT() == MVT::i16) {
967  if (N->getAlignment() < 2)
968  report_fatal_error("atomic load must be aligned");
969  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
970  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
971  N->getAlignment(), N->getMemOperand()->getFlags(),
972  N->getAAInfo());
973  }
974  if (N->getMemoryVT() == MVT::i8)
975  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
976  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
977  N->getAlignment(), N->getMemOperand()->getFlags(),
978  N->getAAInfo());
979  return SDValue();
980 }
981 
982 SDValue XCoreTargetLowering::
983 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
984  AtomicSDNode *N = cast<AtomicSDNode>(Op);
985  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
988  "setInsertFencesForAtomic(true) expects unordered / monotonic");
989  if (N->getMemoryVT() == MVT::i32) {
990  if (N->getAlignment() < 4)
991  report_fatal_error("atomic store must be aligned");
992  return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
993  N->getPointerInfo(), N->getAlignment(),
994  N->getMemOperand()->getFlags(), N->getAAInfo());
995  }
996  if (N->getMemoryVT() == MVT::i16) {
997  if (N->getAlignment() < 2)
998  report_fatal_error("atomic store must be aligned");
999  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1000  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
1001  N->getAlignment(), N->getMemOperand()->getFlags(),
1002  N->getAAInfo());
1003  }
1004  if (N->getMemoryVT() == MVT::i8)
1005  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1006  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
1007  N->getAlignment(), N->getMemOperand()->getFlags(),
1008  N->getAAInfo());
1009  return SDValue();
1010 }
1011 
1012 //===----------------------------------------------------------------------===//
1013 // Calling Convention Implementation
1014 //===----------------------------------------------------------------------===//
1015 
1016 #include "XCoreGenCallingConv.inc"
1017 
1018 //===----------------------------------------------------------------------===//
1019 // Call Calling Convention Implementation
1020 //===----------------------------------------------------------------------===//
1021 
1022 /// XCore call implementation
1023 SDValue
1024 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1025  SmallVectorImpl<SDValue> &InVals) const {
1026  SelectionDAG &DAG = CLI.DAG;
1027  SDLoc &dl = CLI.DL;
1029  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1031  SDValue Chain = CLI.Chain;
1032  SDValue Callee = CLI.Callee;
1033  bool &isTailCall = CLI.IsTailCall;
1034  CallingConv::ID CallConv = CLI.CallConv;
1035  bool isVarArg = CLI.IsVarArg;
1036 
1037  // XCore target does not yet support tail call optimization.
1038  isTailCall = false;
1039 
1040  // For now, only CallingConv::C implemented
1041  switch (CallConv)
1042  {
1043  default:
1044  report_fatal_error("Unsupported calling convention");
1045  case CallingConv::Fast:
1046  case CallingConv::C:
1047  return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1048  Outs, OutVals, Ins, dl, DAG, InVals);
1049  }
1050 }
1051 
1052 /// LowerCallResult - Lower the result values of a call into the
1053 /// appropriate copies out of appropriate physical registers / memory locations.
1055  const SmallVectorImpl<CCValAssign> &RVLocs,
1056  const SDLoc &dl, SelectionDAG &DAG,
1057  SmallVectorImpl<SDValue> &InVals) {
1058  SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1059  // Copy results out of physical registers.
1060  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1061  const CCValAssign &VA = RVLocs[i];
1062  if (VA.isRegLoc()) {
1063  Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1064  InFlag).getValue(1);
1065  InFlag = Chain.getValue(2);
1066  InVals.push_back(Chain.getValue(0));
1067  } else {
1068  assert(VA.isMemLoc());
1069  ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1070  InVals.size()));
1071  // Reserve space for this result.
1072  InVals.push_back(SDValue());
1073  }
1074  }
1075 
1076  // Copy results out of memory.
1077  SmallVector<SDValue, 4> MemOpChains;
1078  for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1079  int offset = ResultMemLocs[i].first;
1080  unsigned index = ResultMemLocs[i].second;
1081  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1082  SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1083  SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1084  InVals[index] = load;
1085  MemOpChains.push_back(load.getValue(1));
1086  }
1087 
1088  // Transform all loads nodes into one single node because
1089  // all load nodes are independent of each other.
1090  if (!MemOpChains.empty())
1091  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1092 
1093  return Chain;
1094 }
1095 
1096 /// LowerCCCCallTo - functions arguments are copied from virtual
1097 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1098 /// CALLSEQ_END are emitted.
1099 /// TODO: isTailCall, sret.
1100 SDValue XCoreTargetLowering::LowerCCCCallTo(
1101  SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1102  bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1103  const SmallVectorImpl<SDValue> &OutVals,
1104  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1105  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1106 
1107  // Analyze operands of the call, assigning locations to each operand.
1109  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1110  *DAG.getContext());
1111 
1112  // The ABI dictates there should be one stack slot available to the callee
1113  // on function entry (for saving lr).
1114  CCInfo.AllocateStack(4, 4);
1115 
1116  CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1117 
1119  // Analyze return values to determine the number of bytes of stack required.
1120  CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1121  *DAG.getContext());
1122  RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1123  RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1124 
1125  // Get a count of how many bytes are to be pushed on the stack.
1126  unsigned NumBytes = RetCCInfo.getNextStackOffset();
1127  auto PtrVT = getPointerTy(DAG.getDataLayout());
1128 
1129  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1130 
1132  SmallVector<SDValue, 12> MemOpChains;
1133 
1134  // Walk the register/memloc assignments, inserting copies/loads.
1135  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1136  CCValAssign &VA = ArgLocs[i];
1137  SDValue Arg = OutVals[i];
1138 
1139  // Promote the value if needed.
1140  switch (VA.getLocInfo()) {
1141  default: llvm_unreachable("Unknown loc info!");
1142  case CCValAssign::Full: break;
1143  case CCValAssign::SExt:
1144  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1145  break;
1146  case CCValAssign::ZExt:
1147  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1148  break;
1149  case CCValAssign::AExt:
1150  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1151  break;
1152  }
1153 
1154  // Arguments that can be passed on register must be kept at
1155  // RegsToPass vector
1156  if (VA.isRegLoc()) {
1157  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1158  } else {
1159  assert(VA.isMemLoc());
1160 
1161  int Offset = VA.getLocMemOffset();
1162 
1163  MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1164  Chain, Arg,
1165  DAG.getConstant(Offset/4, dl,
1166  MVT::i32)));
1167  }
1168  }
1169 
1170  // Transform all store nodes into one single node because
1171  // all store nodes are independent of each other.
1172  if (!MemOpChains.empty())
1173  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1174 
1175  // Build a sequence of copy-to-reg nodes chained together with token
1176  // chain and flag operands which copy the outgoing args into registers.
1177  // The InFlag in necessary since all emitted instructions must be
1178  // stuck together.
1179  SDValue InFlag;
1180  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1181  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1182  RegsToPass[i].second, InFlag);
1183  InFlag = Chain.getValue(1);
1184  }
1185 
1186  // If the callee is a GlobalAddress node (quite common, every direct call is)
1187  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1188  // Likewise ExternalSymbol -> TargetExternalSymbol.
1189  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1190  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1191  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1192  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1193 
1194  // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1195  // = Chain, Callee, Reg#1, Reg#2, ...
1196  //
1197  // Returns a chain & a flag for retval copy to use.
1198  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1200  Ops.push_back(Chain);
1201  Ops.push_back(Callee);
1202 
1203  // Add argument registers to the end of the list so that they are
1204  // known live into the call.
1205  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1206  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1207  RegsToPass[i].second.getValueType()));
1208 
1209  if (InFlag.getNode())
1210  Ops.push_back(InFlag);
1211 
1212  Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1213  InFlag = Chain.getValue(1);
1214 
1215  // Create the CALLSEQ_END node.
1216  Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1217  DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1218  InFlag = Chain.getValue(1);
1219 
1220  // Handle result values, copying them out of physregs into vregs that we
1221  // return.
1222  return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1223 }
1224 
1225 //===----------------------------------------------------------------------===//
1226 // Formal Arguments Calling Convention Implementation
1227 //===----------------------------------------------------------------------===//
1228 
1229 namespace {
1230  struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1231 }
1232 
1233 /// XCore formal arguments implementation
1234 SDValue XCoreTargetLowering::LowerFormalArguments(
1235  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1236  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1237  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1238  switch (CallConv)
1239  {
1240  default:
1241  report_fatal_error("Unsupported calling convention");
1242  case CallingConv::C:
1243  case CallingConv::Fast:
1244  return LowerCCCArguments(Chain, CallConv, isVarArg,
1245  Ins, dl, DAG, InVals);
1246  }
1247 }
1248 
1249 /// LowerCCCArguments - transform physical registers into
1250 /// virtual registers and generate load operations for
1251 /// arguments places on the stack.
1252 /// TODO: sret
1253 SDValue XCoreTargetLowering::LowerCCCArguments(
1254  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1255  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1256  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1257  MachineFunction &MF = DAG.getMachineFunction();
1258  MachineFrameInfo &MFI = MF.getFrameInfo();
1259  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1261 
1262  // Assign locations to all of the incoming arguments.
1264  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1265  *DAG.getContext());
1266 
1267  CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1268 
1269  unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1270 
1271  unsigned LRSaveSize = StackSlotSize;
1272 
1273  if (!isVarArg)
1274  XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1275 
1276  // All getCopyFromReg ops must precede any getMemcpys to prevent the
1277  // scheduler clobbering a register before it has been copied.
1278  // The stages are:
1279  // 1. CopyFromReg (and load) arg & vararg registers.
1280  // 2. Chain CopyFromReg nodes into a TokenFactor.
1281  // 3. Memcpy 'byVal' args & push final InVals.
1282  // 4. Chain mem ops nodes into a TokenFactor.
1283  SmallVector<SDValue, 4> CFRegNode;
1285  SmallVector<SDValue, 4> MemOps;
1286 
1287  // 1a. CopyFromReg (and load) arg registers.
1288  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1289 
1290  CCValAssign &VA = ArgLocs[i];
1291  SDValue ArgIn;
1292 
1293  if (VA.isRegLoc()) {
1294  // Arguments passed in registers
1295  EVT RegVT = VA.getLocVT();
1296  switch (RegVT.getSimpleVT().SimpleTy) {
1297  default:
1298  {
1299 #ifndef NDEBUG
1300  errs() << "LowerFormalArguments Unhandled argument type: "
1301  << RegVT.getEVTString() << "\n";
1302 #endif
1303  llvm_unreachable(nullptr);
1304  }
1305  case MVT::i32:
1306  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1307  RegInfo.addLiveIn(VA.getLocReg(), VReg);
1308  ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1309  CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1310  }
1311  } else {
1312  // sanity check
1313  assert(VA.isMemLoc());
1314  // Load the argument to a virtual register
1315  unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1316  if (ObjSize > StackSlotSize) {
1317  errs() << "LowerFormalArguments Unhandled argument type: "
1318  << EVT(VA.getLocVT()).getEVTString()
1319  << "\n";
1320  }
1321  // Create the frame index object for this incoming parameter...
1322  int FI = MFI.CreateFixedObject(ObjSize,
1323  LRSaveSize + VA.getLocMemOffset(),
1324  true);
1325 
1326  // Create the SelectionDAG nodes corresponding to a load
1327  //from this parameter
1328  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1329  ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1331  }
1332  const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1333  ArgData.push_back(ADP);
1334  }
1335 
1336  // 1b. CopyFromReg vararg registers.
1337  if (isVarArg) {
1338  // Argument registers
1339  static const MCPhysReg ArgRegs[] = {
1340  XCore::R0, XCore::R1, XCore::R2, XCore::R3
1341  };
1343  unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1344  if (FirstVAReg < array_lengthof(ArgRegs)) {
1345  int offset = 0;
1346  // Save remaining registers, storing higher register numbers at a higher
1347  // address
1348  for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1349  // Create a stack slot
1350  int FI = MFI.CreateFixedObject(4, offset, true);
1351  if (i == (int)FirstVAReg) {
1352  XFI->setVarArgsFrameIndex(FI);
1353  }
1354  offset -= StackSlotSize;
1355  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1356  // Move argument from phys reg -> virt reg
1357  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1358  RegInfo.addLiveIn(ArgRegs[i], VReg);
1359  SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1360  CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1361  // Move argument from virt reg -> stack
1362  SDValue Store =
1363  DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1364  MemOps.push_back(Store);
1365  }
1366  } else {
1367  // This will point to the next argument passed via stack.
1368  XFI->setVarArgsFrameIndex(
1369  MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1370  true));
1371  }
1372  }
1373 
1374  // 2. chain CopyFromReg nodes into a TokenFactor.
1375  if (!CFRegNode.empty())
1376  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1377 
1378  // 3. Memcpy 'byVal' args & push final InVals.
1379  // Aggregates passed "byVal" need to be copied by the callee.
1380  // The callee will use a pointer to this copy, rather than the original
1381  // pointer.
1382  for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1383  ArgDE = ArgData.end();
1384  ArgDI != ArgDE; ++ArgDI) {
1385  if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1386  unsigned Size = ArgDI->Flags.getByValSize();
1387  unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1388  // Create a new object on the stack and copy the pointee into it.
1389  int FI = MFI.CreateStackObject(Size, Align, false);
1390  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1391  InVals.push_back(FIN);
1392  MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1393  DAG.getConstant(Size, dl, MVT::i32),
1394  Align, false, false, false,
1396  MachinePointerInfo()));
1397  } else {
1398  InVals.push_back(ArgDI->SDV);
1399  }
1400  }
1401 
1402  // 4, chain mem ops nodes into a TokenFactor.
1403  if (!MemOps.empty()) {
1404  MemOps.push_back(Chain);
1405  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1406  }
1407 
1408  return Chain;
1409 }
1410 
1411 //===----------------------------------------------------------------------===//
1412 // Return Value Calling Convention Implementation
1413 //===----------------------------------------------------------------------===//
1414 
1415 bool XCoreTargetLowering::
1416 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1417  bool isVarArg,
1418  const SmallVectorImpl<ISD::OutputArg> &Outs,
1419  LLVMContext &Context) const {
1421  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1422  if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1423  return false;
1424  if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1425  return false;
1426  return true;
1427 }
1428 
1429 SDValue
1430 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1431  bool isVarArg,
1432  const SmallVectorImpl<ISD::OutputArg> &Outs,
1433  const SmallVectorImpl<SDValue> &OutVals,
1434  const SDLoc &dl, SelectionDAG &DAG) const {
1435 
1436  XCoreFunctionInfo *XFI =
1439 
1440  // CCValAssign - represent the assignment of
1441  // the return value to a location
1443 
1444  // CCState - Info about the registers and stack slot.
1445  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1446  *DAG.getContext());
1447 
1448  // Analyze return values.
1449  if (!isVarArg)
1450  CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1451 
1452  CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1453 
1454  SDValue Flag;
1455  SmallVector<SDValue, 4> RetOps(1, Chain);
1456 
1457  // Return on XCore is always a "retsp 0"
1458  RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1459 
1460  SmallVector<SDValue, 4> MemOpChains;
1461  // Handle return values that must be copied to memory.
1462  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1463  CCValAssign &VA = RVLocs[i];
1464  if (VA.isRegLoc())
1465  continue;
1466  assert(VA.isMemLoc());
1467  if (isVarArg) {
1468  report_fatal_error("Can't return value from vararg function in memory");
1469  }
1470 
1471  int Offset = VA.getLocMemOffset();
1472  unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1473  // Create the frame index object for the memory location.
1474  int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1475 
1476  // Create a SelectionDAG node corresponding to a store
1477  // to this memory location.
1478  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1479  MemOpChains.push_back(DAG.getStore(
1480  Chain, dl, OutVals[i], FIN,
1482  }
1483 
1484  // Transform all store nodes into one single node because
1485  // all stores are independent of each other.
1486  if (!MemOpChains.empty())
1487  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1488 
1489  // Now handle return values copied to registers.
1490  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1491  CCValAssign &VA = RVLocs[i];
1492  if (!VA.isRegLoc())
1493  continue;
1494  // Copy the result values into the output registers.
1495  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1496 
1497  // guarantee that all emitted copies are
1498  // stuck together, avoiding something bad
1499  Flag = Chain.getValue(1);
1500  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1501  }
1502 
1503  RetOps[0] = Chain; // Update chain.
1504 
1505  // Add the flag if we have it.
1506  if (Flag.getNode())
1507  RetOps.push_back(Flag);
1508 
1509  return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1510 }
1511 
1512 //===----------------------------------------------------------------------===//
1513 // Other Lowering Code
1514 //===----------------------------------------------------------------------===//
1515 
1518  MachineBasicBlock *BB) const {
1519  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1520  DebugLoc dl = MI.getDebugLoc();
1521  assert((MI.getOpcode() == XCore::SELECT_CC) &&
1522  "Unexpected instr type to insert");
1523 
1524  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1525  // control-flow pattern. The incoming instruction knows the destination vreg
1526  // to set, the condition code register to branch on, the true/false values to
1527  // select between, and a branch opcode to use.
1528  const BasicBlock *LLVM_BB = BB->getBasicBlock();
1530 
1531  // thisMBB:
1532  // ...
1533  // TrueVal = ...
1534  // cmpTY ccX, r1, r2
1535  // bCC copy1MBB
1536  // fallthrough --> copy0MBB
1537  MachineBasicBlock *thisMBB = BB;
1538  MachineFunction *F = BB->getParent();
1539  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1540  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1541  F->insert(It, copy0MBB);
1542  F->insert(It, sinkMBB);
1543 
1544  // Transfer the remainder of BB and its successor edges to sinkMBB.
1545  sinkMBB->splice(sinkMBB->begin(), BB,
1546  std::next(MachineBasicBlock::iterator(MI)), BB->end());
1547  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1548 
1549  // Next, add the true and fallthrough blocks as its successors.
1550  BB->addSuccessor(copy0MBB);
1551  BB->addSuccessor(sinkMBB);
1552 
1553  BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1554  .addReg(MI.getOperand(1).getReg())
1555  .addMBB(sinkMBB);
1556 
1557  // copy0MBB:
1558  // %FalseValue = ...
1559  // # fallthrough to sinkMBB
1560  BB = copy0MBB;
1561 
1562  // Update machine-CFG edges
1563  BB->addSuccessor(sinkMBB);
1564 
1565  // sinkMBB:
1566  // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1567  // ...
1568  BB = sinkMBB;
1569  BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1570  .addReg(MI.getOperand(3).getReg())
1571  .addMBB(copy0MBB)
1572  .addReg(MI.getOperand(2).getReg())
1573  .addMBB(thisMBB);
1574 
1575  MI.eraseFromParent(); // The pseudo instruction is gone now.
1576  return BB;
1577 }
1578 
1579 //===----------------------------------------------------------------------===//
1580 // Target Optimization Hooks
1581 //===----------------------------------------------------------------------===//
1582 
1583 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1584  DAGCombinerInfo &DCI) const {
1585  SelectionDAG &DAG = DCI.DAG;
1586  SDLoc dl(N);
1587  switch (N->getOpcode()) {
1588  default: break;
1589  case ISD::INTRINSIC_VOID:
1590  switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1591  case Intrinsic::xcore_outt:
1593  case Intrinsic::xcore_chkct: {
1594  SDValue OutVal = N->getOperand(3);
1595  // These instructions ignore the high bits.
1596  if (OutVal.hasOneUse()) {
1597  unsigned BitWidth = OutVal.getValueSizeInBits();
1598  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1599  KnownBits Known;
1601  !DCI.isBeforeLegalizeOps());
1602  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1603  if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1604  TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1605  DCI.CommitTargetLoweringOpt(TLO);
1606  }
1607  break;
1608  }
1609  case Intrinsic::xcore_setpt: {
1610  SDValue Time = N->getOperand(3);
1611  // This instruction ignores the high bits.
1612  if (Time.hasOneUse()) {
1613  unsigned BitWidth = Time.getValueSizeInBits();
1614  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1615  KnownBits Known;
1617  !DCI.isBeforeLegalizeOps());
1618  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1619  if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1620  TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1621  DCI.CommitTargetLoweringOpt(TLO);
1622  }
1623  break;
1624  }
1625  }
1626  break;
1627  case XCoreISD::LADD: {
1628  SDValue N0 = N->getOperand(0);
1629  SDValue N1 = N->getOperand(1);
1630  SDValue N2 = N->getOperand(2);
1633  EVT VT = N0.getValueType();
1634 
1635  // canonicalize constant to RHS
1636  if (N0C && !N1C)
1637  return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1638 
1639  // fold (ladd 0, 0, x) -> 0, x & 1
1640  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1641  SDValue Carry = DAG.getConstant(0, dl, VT);
1642  SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1643  DAG.getConstant(1, dl, VT));
1644  SDValue Ops[] = { Result, Carry };
1645  return DAG.getMergeValues(Ops, dl);
1646  }
1647 
1648  // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1649  // low bit set
1650  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1651  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1652  VT.getSizeInBits() - 1);
1653  KnownBits Known = DAG.computeKnownBits(N2);
1654  if ((Known.Zero & Mask) == Mask) {
1655  SDValue Carry = DAG.getConstant(0, dl, VT);
1656  SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1657  SDValue Ops[] = { Result, Carry };
1658  return DAG.getMergeValues(Ops, dl);
1659  }
1660  }
1661  }
1662  break;
1663  case XCoreISD::LSUB: {
1664  SDValue N0 = N->getOperand(0);
1665  SDValue N1 = N->getOperand(1);
1666  SDValue N2 = N->getOperand(2);
1669  EVT VT = N0.getValueType();
1670 
1671  // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1672  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1673  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1674  VT.getSizeInBits() - 1);
1675  KnownBits Known = DAG.computeKnownBits(N2);
1676  if ((Known.Zero & Mask) == Mask) {
1677  SDValue Borrow = N2;
1678  SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1679  DAG.getConstant(0, dl, VT), N2);
1680  SDValue Ops[] = { Result, Borrow };
1681  return DAG.getMergeValues(Ops, dl);
1682  }
1683  }
1684 
1685  // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1686  // low bit set
1687  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1688  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1689  VT.getSizeInBits() - 1);
1690  KnownBits Known = DAG.computeKnownBits(N2);
1691  if ((Known.Zero & Mask) == Mask) {
1692  SDValue Borrow = DAG.getConstant(0, dl, VT);
1693  SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1694  SDValue Ops[] = { Result, Borrow };
1695  return DAG.getMergeValues(Ops, dl);
1696  }
1697  }
1698  }
1699  break;
1700  case XCoreISD::LMUL: {
1701  SDValue N0 = N->getOperand(0);
1702  SDValue N1 = N->getOperand(1);
1703  SDValue N2 = N->getOperand(2);
1704  SDValue N3 = N->getOperand(3);
1707  EVT VT = N0.getValueType();
1708  // Canonicalize multiplicative constant to RHS. If both multiplicative
1709  // operands are constant canonicalize smallest to RHS.
1710  if ((N0C && !N1C) ||
1711  (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1712  return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1713  N1, N0, N2, N3);
1714 
1715  // lmul(x, 0, a, b)
1716  if (N1C && N1C->isNullValue()) {
1717  // If the high result is unused fold to add(a, b)
1718  if (N->hasNUsesOfValue(0, 0)) {
1719  SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1720  SDValue Ops[] = { Lo, Lo };
1721  return DAG.getMergeValues(Ops, dl);
1722  }
1723  // Otherwise fold to ladd(a, b, 0)
1724  SDValue Result =
1725  DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1726  SDValue Carry(Result.getNode(), 1);
1727  SDValue Ops[] = { Carry, Result };
1728  return DAG.getMergeValues(Ops, dl);
1729  }
1730  }
1731  break;
1732  case ISD::ADD: {
1733  // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1734  // lmul(x, y, a, b). The high result of lmul will be ignored.
1735  // This is only profitable if the intermediate results are unused
1736  // elsewhere.
1737  SDValue Mul0, Mul1, Addend0, Addend1;
1738  if (N->getValueType(0) == MVT::i32 &&
1739  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1740  SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1741  DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1742  Mul1, Addend0, Addend1);
1743  SDValue Result(Ignored.getNode(), 1);
1744  return Result;
1745  }
1746  APInt HighMask = APInt::getHighBitsSet(64, 32);
1747  // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1748  // lmul(x, y, a, b) if all operands are zero-extended. We do this
1749  // before type legalization as it is messy to match the operands after
1750  // that.
1751  if (N->getValueType(0) == MVT::i64 &&
1752  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1753  DAG.MaskedValueIsZero(Mul0, HighMask) &&
1754  DAG.MaskedValueIsZero(Mul1, HighMask) &&
1755  DAG.MaskedValueIsZero(Addend0, HighMask) &&
1756  DAG.MaskedValueIsZero(Addend1, HighMask)) {
1757  SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1758  Mul0, DAG.getConstant(0, dl, MVT::i32));
1759  SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1760  Mul1, DAG.getConstant(0, dl, MVT::i32));
1761  SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1762  Addend0, DAG.getConstant(0, dl, MVT::i32));
1763  SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1764  Addend1, DAG.getConstant(0, dl, MVT::i32));
1765  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1766  DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1767  Addend0L, Addend1L);
1768  SDValue Lo(Hi.getNode(), 1);
1769  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1770  }
1771  }
1772  break;
1773  case ISD::STORE: {
1774  // Replace unaligned store of unaligned load with memmove.
1775  StoreSDNode *ST = cast<StoreSDNode>(N);
1776  if (!DCI.isBeforeLegalize() ||
1778  ST->getAddressSpace(),
1779  ST->getAlignment()) ||
1780  ST->isVolatile() || ST->isIndexed()) {
1781  break;
1782  }
1783  SDValue Chain = ST->getChain();
1784 
1785  unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1786  assert((StoreBits % 8) == 0 &&
1787  "Store size in bits must be a multiple of 8");
1788  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
1789  ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1790  unsigned Alignment = ST->getAlignment();
1791  if (Alignment >= ABIAlignment) {
1792  break;
1793  }
1794 
1795  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1796  if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1797  LD->getAlignment() == Alignment &&
1798  !LD->isVolatile() && !LD->isIndexed() &&
1800  bool isTail = isInTailCallPosition(DAG, ST, Chain);
1801  return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1802  LD->getBasePtr(),
1803  DAG.getConstant(StoreBits/8, dl, MVT::i32),
1804  Alignment, false, isTail, ST->getPointerInfo(),
1805  LD->getPointerInfo());
1806  }
1807  }
1808  break;
1809  }
1810  }
1811  return SDValue();
1812 }
1813 
1814 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1815  KnownBits &Known,
1816  const APInt &DemandedElts,
1817  const SelectionDAG &DAG,
1818  unsigned Depth) const {
1819  Known.resetAll();
1820  switch (Op.getOpcode()) {
1821  default: break;
1822  case XCoreISD::LADD:
1823  case XCoreISD::LSUB:
1824  if (Op.getResNo() == 1) {
1825  // Top bits of carry / borrow are clear.
1826  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1827  Known.getBitWidth() - 1);
1828  }
1829  break;
1831  {
1832  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1833  switch (IntNo) {
1835  // High bits are known to be zero.
1836  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1837  Known.getBitWidth() - 16);
1838  break;
1839  case Intrinsic::xcore_int:
1840  case Intrinsic::xcore_inct:
1841  // High bits are known to be zero.
1842  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1843  Known.getBitWidth() - 8);
1844  break;
1846  // Result is either 0 or 1.
1847  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1848  Known.getBitWidth() - 1);
1849  break;
1851  // Result is in the range 0 - 4.
1852  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1853  Known.getBitWidth() - 3);
1854  break;
1855  }
1856  }
1857  break;
1858  }
1859 }
1860 
1861 //===----------------------------------------------------------------------===//
1862 // Addressing mode description hooks
1863 //===----------------------------------------------------------------------===//
1864 
1865 static inline bool isImmUs(int64_t val)
1866 {
1867  return (val >= 0 && val <= 11);
1868 }
1869 
1870 static inline bool isImmUs2(int64_t val)
1871 {
1872  return (val%2 == 0 && isImmUs(val/2));
1873 }
1874 
1875 static inline bool isImmUs4(int64_t val)
1876 {
1877  return (val%4 == 0 && isImmUs(val/4));
1878 }
1879 
1880 /// isLegalAddressingMode - Return true if the addressing mode represented
1881 /// by AM is legal for this target, for a load/store of the specified type.
1883  const AddrMode &AM, Type *Ty,
1884  unsigned AS,
1885  Instruction *I) const {
1886  if (Ty->getTypeID() == Type::VoidTyID)
1887  return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1888 
1889  unsigned Size = DL.getTypeAllocSize(Ty);
1890  if (AM.BaseGV) {
1891  return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1892  AM.BaseOffs%4 == 0;
1893  }
1894 
1895  switch (Size) {
1896  case 1:
1897  // reg + imm
1898  if (AM.Scale == 0) {
1899  return isImmUs(AM.BaseOffs);
1900  }
1901  // reg + reg
1902  return AM.Scale == 1 && AM.BaseOffs == 0;
1903  case 2:
1904  case 3:
1905  // reg + imm
1906  if (AM.Scale == 0) {
1907  return isImmUs2(AM.BaseOffs);
1908  }
1909  // reg + reg<<1
1910  return AM.Scale == 2 && AM.BaseOffs == 0;
1911  default:
1912  // reg + imm
1913  if (AM.Scale == 0) {
1914  return isImmUs4(AM.BaseOffs);
1915  }
1916  // reg + reg<<2
1917  return AM.Scale == 4 && AM.BaseOffs == 0;
1918  }
1919 }
1920 
1921 //===----------------------------------------------------------------------===//
1922 // XCore Inline Assembly Support
1923 //===----------------------------------------------------------------------===//
1924 
1925 std::pair<unsigned, const TargetRegisterClass *>
1926 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1927  StringRef Constraint,
1928  MVT VT) const {
1929  if (Constraint.size() == 1) {
1930  switch (Constraint[0]) {
1931  default : break;
1932  case 'r':
1933  return std::make_pair(0U, &XCore::GRRegsRegClass);
1934  }
1935  }
1936  // Use the default implementation in TargetLowering to convert the register
1937  // constraint into a member of a register class.
1938  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1939 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isMachineConstantPoolEntry() const
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:764
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
int createLRSpillSlot(MachineFunction &MF)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static bool isImmUs2(int64_t val)
const std::vector< MachineJumpTableEntry > & getJumpTables() const
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool hasLocalLinkage() const
Definition: GlobalValue.h:436
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:328
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:836
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:650
This class represents lattice values for constants.
Definition: AllocatorList.h:24
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1154
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const SDValue & getVal() const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:699
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:648
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getBasePtr() const
const SDValue & getChain() const
Function Alias Analysis Results
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
unsigned getAlignment() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:685
#define R2(n)
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
uint64_t High
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:781
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:435
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getExternalSymbol(const char *Sym, EVT VT)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool isMemLoc() const
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:136
static bool isImmUs(int64_t val)
unsigned getAddressSpace() const
Return the address space for the associated pointer.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static int stackSlotSize()
Stack slot size (4 bytes)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:456
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:840
bool hasSection() const
Definition: GlobalValue.h:270
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:371
const HexagonInstrInfo * TII
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:138
Shift and rotation operations.
Definition: ISDOpcodes.h:410
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:191
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static const unsigned CodeModelLargeSize
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:460
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:401
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This is an SDNode representing atomic operations.
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:784
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:695
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:91
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getSizeInBits() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:267
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void setReturnStackOffset(unsigned value)
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:398
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
SmallVector< ISD::OutputArg, 32 > Outs
unsigned getAlignment() const
Definition: Globals.cpp:97
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
amdgpu Simplify well known AMD library false Value * Callee
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
This class is used to represent ISD::STORE nodes.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:118
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:636
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1773
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:610
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:750
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
0: type with no size
Definition: Type.h:57
const SDValue & getBasePtr() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:166
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:66
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:728
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:129
const SDValue & getOperand(unsigned Num) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:824
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:161
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:758
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:767
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
Definition: ValueTypes.cpp:115
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:82
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:719
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
size_t size() const
Definition: SmallVector.h:53
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
bool isVolatile() const
const TargetMachine & getTargetMachine() const
StringRef getSection() const
Definition: Globals.cpp:161
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
unsigned first
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:639
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:633
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:404
Iterator for intrusive lists based on ilist_node.
unsigned getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
Definition: ValueTypes.h:310
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:1044
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:730
const DataFlowGraph & G
Definition: RDFGraph.cpp:211
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:622
Represents one node in the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:679
static mvt_range integer_valuetypes()
MachinePointerInfo getWithOffset(int64_t O) const
EVT getMemoryVT() const
Return the type of the in-memory value.
Class for arbitrary precision integers.
Definition: APInt.h:70
CodeModel::Model getCodeModel() const
Returns the code model.
static bool isImmUs4(int64_t val)
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:468
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:471
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:436
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:638
Representation of each machine instruction.
Definition: MachineInstr.h:64
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:724
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:673
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:133
SmallVector< SDValue, 32 > OutVals
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:215
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:387
const TargetRegisterInfo * getRegisterInfo() const override
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:705
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:206
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:614
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Flags getFlags() const
Return the raw flags of the source value,.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Type * getValueType() const
Definition: GlobalValue.h:276
uint32_t Size
Definition: Profile.cpp:47
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),a),b).
unsigned getOpcode() const
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents &#39;eh_return&#39; gcc dwarf builtin...
Definition: ISDOpcodes.h:102
bool isRegLoc() const
const MachinePointerInfo & getPointerInfo() const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if &#39;Op & Mask&#39; is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:566
LLVM Value Representation.
Definition: Value.h:73
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
Conversion operators.
Definition: ISDOpcodes.h:465
const SDValue & getOperand(unsigned i) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:789
unsigned getLocReg() const
uint64_t getZExtValue() const
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:174
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:785
LLVMContext * getContext() const
Definition: SelectionDAG.h:407
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:622
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:380
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:623