109 class ARMFastISel final :
public FastISel {
130 TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()),
131 TLI(*Subtarget->getTargetLowering()) {
140 unsigned fastEmitInst_r(
unsigned MachineInstOpcode,
142 unsigned Op0,
bool Op0IsKill);
143 unsigned fastEmitInst_rr(
unsigned MachineInstOpcode,
145 unsigned Op0,
bool Op0IsKill,
146 unsigned Op1,
bool Op1IsKill);
147 unsigned fastEmitInst_ri(
unsigned MachineInstOpcode,
149 unsigned Op0,
bool Op0IsKill,
151 unsigned fastEmitInst_i(
unsigned MachineInstOpcode,
157 bool fastSelectInstruction(
const Instruction *
I)
override;
158 unsigned fastMaterializeConstant(
const Constant *
C)
override;
159 unsigned fastMaterializeAlloca(
const AllocaInst *AI)
override;
162 bool fastLowerArguments()
override;
164 #include "ARMGenFastISel.inc" 175 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
176 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
181 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
191 bool isPositionIndependent()
const;
192 bool isTypeLegal(
Type *Ty,
MVT &VT);
193 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
194 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
195 bool isZExt,
bool isEquality);
196 bool ARMEmitLoad(
MVT VT,
unsigned &ResultReg,
Address &Addr,
197 unsigned Alignment = 0,
bool isZExt =
true,
198 bool allocReg =
true);
199 bool ARMEmitStore(
MVT VT,
unsigned SrcReg,
Address &Addr,
200 unsigned Alignment = 0);
201 bool ARMComputeAddress(
const Value *Obj,
Address &Addr);
202 void ARMSimplifyAddress(
Address &Addr,
MVT VT,
bool useAM3);
203 bool ARMIsMemCpySmall(uint64_t Len);
204 bool ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
206 unsigned ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
210 unsigned ARMMoveToFPReg(
MVT VT,
unsigned SrcReg);
211 unsigned ARMMoveToIntReg(
MVT VT,
unsigned SrcReg);
212 unsigned ARMSelectCallOp(
bool UseReg);
230 unsigned getLibcallReg(
const Twine &
Name);
233 unsigned &NumBytes,
bool isVarArg);
241 void AddLoadStoreOperands(
MVT VT,
Address &Addr,
248 #include "ARMGenCallingConv.inc" 253 bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
259 if (!MO.isReg() || !MO.isDef())
continue;
260 if (MO.getReg() == ARM::CPSR)
266 bool ARMFastISel::isARMNEONPred(
const MachineInstr *MI) {
275 if (opInfo.isPredicate())
293 if (isARMNEONPred(MI))
299 if (DefinesOptionalPredicate(MI, &CPSR))
304 unsigned ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
306 unsigned Op0,
bool Op0IsKill) {
307 unsigned ResultReg = createResultReg(RC);
314 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
317 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
319 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
320 TII.
get(TargetOpcode::COPY), ResultReg)
326 unsigned ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
328 unsigned Op0,
bool Op0IsKill,
329 unsigned Op1,
bool Op1IsKill) {
330 unsigned ResultReg = createResultReg(RC);
340 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
341 .
addReg(Op0, Op0IsKill * RegState::Kill)
342 .
addReg(Op1, Op1IsKill * RegState::Kill));
344 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
345 .
addReg(Op0, Op0IsKill * RegState::Kill)
346 .
addReg(Op1, Op1IsKill * RegState::Kill));
347 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
348 TII.
get(TargetOpcode::COPY), ResultReg)
354 unsigned ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
356 unsigned Op0,
bool Op0IsKill,
358 unsigned ResultReg = createResultReg(RC);
366 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
367 .
addReg(Op0, Op0IsKill * RegState::Kill)
370 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
371 .
addReg(Op0, Op0IsKill * RegState::Kill)
373 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
374 TII.
get(TargetOpcode::COPY), ResultReg)
380 unsigned ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
383 unsigned ResultReg = createResultReg(RC);
387 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
390 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
392 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
393 TII.
get(TargetOpcode::COPY), ResultReg)
401 unsigned ARMFastISel::ARMMoveToFPReg(
MVT VT,
unsigned SrcReg) {
405 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
411 unsigned ARMFastISel::ARMMoveToIntReg(
MVT VT,
unsigned SrcReg) {
415 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
424 unsigned ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP,
MVT VT) {
441 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
442 TII.
get(Opc), DestReg).addImm(Imm));
447 if (!Subtarget->hasVFP2())
return false;
450 unsigned Align = DL.getPrefTypeAlignment(CFP->
getType());
453 Align = DL.getTypeAllocSize(CFP->
getType());
455 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
457 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
461 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(Opc), DestReg)
462 .addConstantPoolIndex(Idx)
467 unsigned ARMFastISel::ARMMaterializeInt(
const Constant *
C,
MVT VT) {
475 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
478 unsigned ImmReg = createResultReg(RC);
479 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
480 TII.
get(Opc), ImmReg)
491 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
494 unsigned ImmReg = createResultReg(RC);
495 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
496 TII.
get(Opc), ImmReg)
502 unsigned ResultReg = 0;
503 if (Subtarget->useMovt(*FuncInfo.MF))
514 unsigned Align = DL.getPrefTypeAlignment(C->
getType());
517 Align = DL.getTypeAllocSize(C->
getType());
519 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
522 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
523 TII.
get(ARM::t2LDRpci), ResultReg)
524 .addConstantPoolIndex(Idx));
528 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
529 TII.
get(ARM::LDRcp), ResultReg)
530 .addConstantPoolIndex(Idx)
536 bool ARMFastISel::isPositionIndependent()
const {
540 unsigned ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV,
MVT VT) {
545 if (Subtarget->isROPI() || Subtarget->isRWPI())
548 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
551 unsigned DestReg = createResultReg(RC);
556 if (!Subtarget->isTargetMachO() && IsThreadLocal)
return 0;
558 bool IsPositionIndependent = isPositionIndependent();
561 if (Subtarget->useMovt(*FuncInfo.MF) &&
562 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
564 unsigned char TF = 0;
565 if (Subtarget->isTargetMachO())
568 if (IsPositionIndependent)
569 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
571 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
572 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
573 TII.
get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
576 unsigned Align = DL.getPrefTypeAlignment(GV->
getType());
579 Align = DL.getTypeAllocSize(GV->
getType());
582 if (Subtarget->isTargetELF() && IsPositionIndependent)
583 return ARMLowerPICELF(GV, Align, VT);
586 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
591 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
596 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
597 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(Opc),
598 DestReg).addConstantPoolIndex(Idx);
599 if (IsPositionIndependent)
601 AddOptionalDefs(MIB);
605 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
606 TII.
get(ARM::LDRcp), DestReg)
607 .addConstantPoolIndex(Idx)
609 AddOptionalDefs(MIB);
611 if (IsPositionIndependent) {
612 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
616 DbgLoc, TII.
get(Opc), NewDestReg)
619 AddOptionalDefs(MIB);
629 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
630 TII.
get(ARM::t2LDRi12), NewDestReg)
634 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
635 TII.
get(ARM::LDRi12), NewDestReg)
638 DestReg = NewDestReg;
639 AddOptionalDefs(MIB);
645 unsigned ARMFastISel::fastMaterializeConstant(
const Constant *C) {
652 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
653 return ARMMaterializeFP(CFP, VT);
654 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(C))
655 return ARMMaterializeGV(GV, VT);
656 else if (isa<ConstantInt>(C))
657 return ARMMaterializeInt(C, VT);
664 unsigned ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
666 if (!FuncInfo.StaticAllocaMap.count(AI))
return 0;
669 if (!isLoadTypeLegal(AI->
getType(), VT))
return 0;
672 FuncInfo.StaticAllocaMap.find(AI);
676 if (SI != FuncInfo.StaticAllocaMap.
end()) {
677 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
679 unsigned ResultReg = createResultReg(RC);
682 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
683 TII.
get(Opc), ResultReg)
684 .addFrameIndex(SI->second)
692 bool ARMFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
704 bool ARMFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
705 if (isTypeLegal(Ty, VT))
return true;
716 bool ARMFastISel::ARMComputeAddress(
const Value *Obj,
Address &Addr) {
718 const User *U =
nullptr;
719 unsigned Opcode = Instruction::UserOp1;
720 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
723 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
724 FuncInfo.MBBMap[
I->getParent()] == FuncInfo.MBB) {
725 Opcode =
I->getOpcode();
728 }
else if (
const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
729 Opcode = C->getOpcode();
734 if (Ty->getAddressSpace() > 255)
742 case Instruction::BitCast:
744 return ARMComputeAddress(U->
getOperand(0), Addr);
745 case Instruction::IntToPtr:
749 return ARMComputeAddress(U->
getOperand(0), Addr);
751 case Instruction::PtrToInt:
754 return ARMComputeAddress(U->
getOperand(0), Addr);
756 case Instruction::GetElementPtr: {
758 int TmpOffset = Addr.Offset;
764 i != e; ++i, ++GTI) {
768 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
773 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
778 if (canFoldAddIntoGEP(U, Op)) {
781 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
784 Op = cast<AddOperator>(
Op)->getOperand(0);
788 goto unsupported_gep;
794 Addr.Offset = TmpOffset;
795 if (ARMComputeAddress(U->
getOperand(0), Addr))
return true;
803 case Instruction::Alloca: {
806 FuncInfo.StaticAllocaMap.
find(AI);
807 if (SI != FuncInfo.StaticAllocaMap.end()) {
808 Addr.BaseType = Address::FrameIndexBase;
809 Addr.Base.FI = SI->second;
817 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
818 return Addr.Base.Reg != 0;
821 void ARMFastISel::ARMSimplifyAddress(
Address &Addr,
MVT VT,
bool useAM3) {
822 bool needsLowering =
false;
831 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
833 if (needsLowering && isThumb2)
834 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
838 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
844 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
851 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
854 unsigned ResultReg = createResultReg(RC);
855 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
856 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
857 TII.
get(Opc), ResultReg)
858 .addFrameIndex(Addr.Base.FI)
860 Addr.Base.Reg = ResultReg;
861 Addr.BaseType = Address::RegBase;
873 void ARMFastISel::AddLoadStoreOperands(
MVT VT,
Address &Addr,
883 if (Addr.BaseType == Address::FrameIndexBase) {
884 int FI = Addr.Base.FI;
888 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
895 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
904 MIB.
addReg(Addr.Base.Reg);
909 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
916 AddOptionalDefs(MIB);
919 bool ARMFastISel::ARMEmitLoad(
MVT VT,
unsigned &ResultReg,
Address &Addr,
920 unsigned Alignment,
bool isZExt,
bool allocReg) {
923 bool needVMOV =
false;
927 default:
return false;
931 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
932 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
934 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
943 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
946 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
950 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
951 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
953 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
955 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
958 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
961 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
965 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
972 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
975 if (!Subtarget->hasVFP2())
return false;
977 if (Alignment && Alignment < 4) {
980 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
981 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
988 if (!Subtarget->hasVFP2())
return false;
991 if (Alignment && Alignment < 4)
999 ARMSimplifyAddress(Addr, VT, useAM3);
1003 ResultReg = createResultReg(RC);
1004 assert(ResultReg > 255 &&
"Expected an allocated virtual register.");
1006 TII.
get(Opc), ResultReg);
1013 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1015 .addReg(ResultReg));
1030 if (
const Argument *
Arg = dyn_cast<Argument>(SV)) {
1031 if (
Arg->hasSwiftErrorAttr())
1035 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1036 if (Alloca->isSwiftError())
1043 if (!isLoadTypeLegal(I->
getType(), VT))
1048 if (!ARMComputeAddress(I->
getOperand(0), Addr))
return false;
1051 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1053 updateValueMap(I, ResultReg);
1057 bool ARMFastISel::ARMEmitStore(
MVT VT,
unsigned SrcReg,
Address &Addr,
1058 unsigned Alignment) {
1060 bool useAM3 =
false;
1063 default:
return false;
1065 unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1066 : &ARM::GPRRegClass);
1067 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1069 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1071 .addReg(SrcReg).
addImm(1));
1077 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1078 StrOpc = ARM::t2STRBi8;
1080 StrOpc = ARM::t2STRBi12;
1082 StrOpc = ARM::STRBi12;
1086 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1090 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1091 StrOpc = ARM::t2STRHi8;
1093 StrOpc = ARM::t2STRHi12;
1100 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1104 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1105 StrOpc = ARM::t2STRi8;
1107 StrOpc = ARM::t2STRi12;
1109 StrOpc = ARM::STRi12;
1113 if (!Subtarget->hasVFP2())
return false;
1115 if (Alignment && Alignment < 4) {
1117 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1122 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1124 StrOpc = ARM::VSTRS;
1128 if (!Subtarget->hasVFP2())
return false;
1131 if (Alignment && Alignment < 4)
1134 StrOpc = ARM::VSTRD;
1138 ARMSimplifyAddress(Addr, VT, useAM3);
1149 bool ARMFastISel::SelectStore(
const Instruction *I) {
1151 unsigned SrcReg = 0;
1154 if (cast<StoreInst>(I)->
isAtomic())
1161 if (
const Argument *
Arg = dyn_cast<Argument>(PtrV)) {
1162 if (
Arg->hasSwiftErrorAttr())
1166 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1167 if (Alloca->isSwiftError())
1178 SrcReg = getRegForValue(Op0);
1179 if (SrcReg == 0)
return false;
1183 if (!ARMComputeAddress(I->
getOperand(1), Addr))
1186 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1238 bool ARMFastISel::SelectBranch(
const Instruction *I) {
1252 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1267 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1268 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(BrOpc))
1270 finishCondBranch(BI->
getParent(), TBB, FBB);
1275 if (TI->hasOneUse() && TI->getParent() == I->
getParent() &&
1276 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1277 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1278 unsigned OpReg = getRegForValue(TI->getOperand(0));
1280 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1282 .addReg(OpReg).
addImm(1));
1285 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1290 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(BrOpc))
1294 finishCondBranch(BI->
getParent(), TBB, FBB);
1301 fastEmitBranch(Target, DbgLoc);
1306 if (CmpReg == 0)
return false;
1315 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(TstOpc))
1323 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1328 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(BrOpc))
1331 finishCondBranch(BI->
getParent(), TBB, FBB);
1335 bool ARMFastISel::SelectIndirectBr(
const Instruction *I) {
1336 unsigned AddrReg = getRegForValue(I->
getOperand(0));
1337 if (AddrReg == 0)
return false;
1339 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1340 assert(isThumb2 || Subtarget->hasV4TOps());
1342 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1343 TII.
get(Opc)).addReg(AddrReg));
1346 for (
const BasicBlock *SuccBB : IB->successors())
1347 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]);
1352 bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1353 bool isZExt,
bool isEquality) {
1356 if (!SrcEVT.
isSimple())
return false;
1359 if (Ty->
isFloatTy() && !Subtarget->hasVFP2())
1362 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()))
1368 bool UseImm =
false;
1369 bool isNegativeImm =
false;
1372 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1375 const APInt &CIVal = ConstInt->getValue();
1380 if (Imm < 0 && Imm != (
int)0x80000000) {
1381 isNegativeImm =
true;
1387 }
else if (
const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1389 if (ConstFP->isZero() && !ConstFP->isNegative())
1395 bool needsExt =
false;
1397 default:
return false;
1403 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1405 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1411 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1413 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1423 CmpOpc = ARM::t2CMPrr;
1425 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1428 CmpOpc = ARM::CMPrr;
1430 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1435 unsigned SrcReg1 = getRegForValue(Src1Value);
1436 if (SrcReg1 == 0)
return false;
1438 unsigned SrcReg2 = 0;
1440 SrcReg2 = getRegForValue(Src2Value);
1441 if (SrcReg2 == 0)
return false;
1446 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1,
MVT::i32, isZExt);
1447 if (SrcReg1 == 0)
return false;
1449 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2,
MVT::i32, isZExt);
1450 if (SrcReg2 == 0)
return false;
1458 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1462 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1468 AddOptionalDefs(MIB);
1474 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1479 bool ARMFastISel::SelectCmp(
const Instruction *I) {
1480 const CmpInst *CI = cast<CmpInst>(
I);
1495 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1497 : &ARM::GPRRegClass;
1498 unsigned DestReg = createResultReg(RC);
1500 unsigned ZeroReg = fastMaterializeConstant(Zero);
1502 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(MovCCOpc), DestReg)
1503 .addReg(ZeroReg).
addImm(1)
1506 updateValueMap(I, DestReg);
1510 bool ARMFastISel::SelectFPExt(
const Instruction *I) {
1512 if (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP())
return false;
1518 unsigned Op = getRegForValue(V);
1519 if (Op == 0)
return false;
1521 unsigned Result = createResultReg(&ARM::DPRRegClass);
1522 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1523 TII.
get(ARM::VCVTDS), Result)
1525 updateValueMap(I, Result);
1529 bool ARMFastISel::SelectFPTrunc(
const Instruction *I) {
1531 if (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP())
return false;
1537 unsigned Op = getRegForValue(V);
1538 if (Op == 0)
return false;
1540 unsigned Result = createResultReg(&ARM::SPRRegClass);
1541 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1542 TII.
get(ARM::VCVTSD), Result)
1544 updateValueMap(I, Result);
1548 bool ARMFastISel::SelectIToFP(
const Instruction *I,
bool isSigned) {
1550 if (!Subtarget->hasVFP2())
return false;
1554 if (!isTypeLegal(Ty, DstVT))
1565 unsigned SrcReg = getRegForValue(Src);
1566 if (SrcReg == 0)
return false;
1570 SrcReg = ARMEmitIntExt(SrcVT, SrcReg,
MVT::i32,
1572 if (SrcReg == 0)
return false;
1577 unsigned FP = ARMMoveToFPReg(
MVT::f32, SrcReg);
1578 if (FP == 0)
return false;
1581 if (Ty->
isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1582 else if (Ty->
isDoubleTy() && !Subtarget->isFPOnlySP())
1583 Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1587 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1588 TII.
get(Opc), ResultReg).addReg(FP));
1589 updateValueMap(I, ResultReg);
1593 bool ARMFastISel::SelectFPToI(
const Instruction *I,
bool isSigned) {
1595 if (!Subtarget->hasVFP2())
return false;
1599 if (!isTypeLegal(RetTy, DstVT))
1602 unsigned Op = getRegForValue(I->
getOperand(0));
1603 if (Op == 0)
return false;
1607 if (OpTy->
isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1608 else if (OpTy->
isDoubleTy() && !Subtarget->isFPOnlySP())
1609 Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1614 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1615 TII.
get(Opc), ResultReg).addReg(Op));
1619 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1620 if (IntReg == 0)
return false;
1622 updateValueMap(I, IntReg);
1626 bool ARMFastISel::SelectSelect(
const Instruction *I) {
1628 if (!isTypeLegal(I->
getType(), VT))
1634 unsigned CondReg = getRegForValue(I->
getOperand(0));
1635 if (CondReg == 0)
return false;
1636 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1637 if (Op1Reg == 0)
return false;
1641 bool UseImm =
false;
1642 bool isNegativeImm =
false;
1645 Imm = (int)ConstInt->getValue().getZExtValue();
1647 isNegativeImm =
true;
1654 unsigned Op2Reg = 0;
1657 if (Op2Reg == 0)
return false;
1660 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1663 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(TstOpc))
1670 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1671 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1673 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1675 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1677 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1679 unsigned ResultReg = createResultReg(RC);
1683 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(MovCCOpc),
1691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(MovCCOpc),
1698 updateValueMap(I, ResultReg);
1702 bool ARMFastISel::SelectDiv(
const Instruction *I,
bool isSigned) {
1705 if (!isTypeLegal(Ty, VT))
1711 if (Subtarget->hasDivideInThumbMode())
1717 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1719 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1721 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1723 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1725 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1726 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1728 return ARMEmitLibcall(I, LC);
1731 bool ARMFastISel::SelectRem(
const Instruction *I,
bool isSigned) {
1734 if (!isTypeLegal(Ty, VT))
1746 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1748 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1750 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1752 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1754 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1755 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1757 return ARMEmitLibcall(I, LC);
1760 bool ARMFastISel::SelectBinaryIntOp(
const Instruction *I,
unsigned ISDOpcode) {
1769 switch (ISDOpcode) {
1770 default:
return false;
1772 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1775 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1778 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1782 unsigned SrcReg1 = getRegForValue(I->
getOperand(0));
1783 if (SrcReg1 == 0)
return false;
1787 unsigned SrcReg2 = getRegForValue(I->
getOperand(1));
1788 if (SrcReg2 == 0)
return false;
1790 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1793 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1794 TII.
get(Opc), ResultReg)
1795 .addReg(SrcReg1).
addReg(SrcReg2));
1796 updateValueMap(I, ResultReg);
1800 bool ARMFastISel::SelectBinaryFPOp(
const Instruction *I,
unsigned ISDOpcode) {
1802 if (!FPVT.
isSimple())
return false;
1814 if (Ty->
isFloatTy() && !Subtarget->hasVFP2())
1816 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()))
1821 switch (ISDOpcode) {
1822 default:
return false;
1824 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1827 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1830 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1833 unsigned Op1 = getRegForValue(I->
getOperand(0));
1834 if (Op1 == 0)
return false;
1836 unsigned Op2 = getRegForValue(I->
getOperand(1));
1837 if (Op2 == 0)
return false;
1840 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1841 TII.
get(Opc), ResultReg)
1842 .addReg(Op1).
addReg(Op2));
1843 updateValueMap(I, ResultReg);
1858 if (Subtarget->hasVFP2() && !isVarArg) {
1859 if (!Subtarget->isAAPCS_ABI())
1860 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1862 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1868 if (Subtarget->isAAPCS_ABI()) {
1869 if (Subtarget->hasVFP2() &&
1871 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1873 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1875 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1880 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1885 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1887 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1892 return CC_ARM_APCS_GHC;
1905 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
1907 CCAssignFnForCall(CC,
false, isVarArg));
1911 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1926 !VA.
isRegLoc() || !ArgLocs[++i].isRegLoc())
1938 if (!Subtarget->hasVFP2())
1942 if (!Subtarget->hasVFP2())
1956 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1957 TII.
get(AdjStackDown))
1958 .addImm(NumBytes).
addImm(0));
1961 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1968 "We don't handle NEON/vector parameters yet.");
1975 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
1976 assert(Arg != 0 &&
"Failed to emit a sext");
1984 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
1985 assert(Arg != 0 &&
"Failed to emit a zext");
1992 assert(BC != 0 &&
"Failed to emit a bitcast!");
2002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2003 TII.
get(TargetOpcode::COPY), VA.
getLocReg()).addReg(Arg);
2008 "Custom lowering for v2f64 args not available");
2014 "We only handle register args!");
2016 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2027 if (isa<UndefValue>(ArgVal))
2031 Addr.BaseType = Address::RegBase;
2032 Addr.Base.Reg = ARM::SP;
2035 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2036 assert(EmitRet &&
"Could not emit a store for argument!");
2045 unsigned &NumBytes,
bool isVarArg) {
2048 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2049 TII.
get(AdjStackUp))
2050 .addImm(NumBytes).
addImm(0));
2055 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2062 MVT DestVT = RVLocs[0].getValVT();
2064 unsigned ResultReg = createResultReg(DstRC);
2065 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2067 .addReg(RVLocs[0].getLocReg())
2068 .addReg(RVLocs[1].getLocReg()));
2070 UsedRegs.
push_back(RVLocs[0].getLocReg());
2071 UsedRegs.
push_back(RVLocs[1].getLocReg());
2074 updateValueMap(I, ResultReg);
2076 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2077 MVT CopyVT = RVLocs[0].getValVT();
2085 unsigned ResultReg = createResultReg(DstRC);
2086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2087 TII.
get(TargetOpcode::COPY),
2088 ResultReg).addReg(RVLocs[0].getLocReg());
2089 UsedRegs.
push_back(RVLocs[0].getLocReg());
2092 updateValueMap(I, ResultReg);
2099 bool ARMFastISel::SelectRet(
const Instruction *I) {
2103 if (!FuncInfo.CanLowerReturn)
2119 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
2128 unsigned Reg = getRegForValue(RV);
2133 if (ValLocs.size() != 1)
2145 unsigned SrcReg = Reg + VA.
getValNo();
2146 EVT RVEVT = TLI.getValueType(DL, RV->
getType());
2147 if (!RVEVT.
isSimple())
return false;
2151 if (RVVT != DestVT) {
2159 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2160 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2161 if (SrcReg == 0)
return false;
2171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2172 TII.
get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
2179 TII.
get(Subtarget->getReturnOpcode()));
2180 AddOptionalDefs(MIB);
2181 for (
unsigned R : RetRegs)
2186 unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2188 return isThumb2 ? ARM::tBLXr : ARM::BLX;
2190 return isThumb2 ? ARM::tBL :
ARM::BL;
2193 unsigned ARMFastISel::getLibcallReg(
const Twine &
Name) {
2202 assert(GV->
getType() == GVTy &&
"We miscomputed the type for the global!");
2203 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2221 else if (!isTypeLegal(RetTy, RetVT))
2227 CCState CCInfo(CC,
false, *FuncInfo.MF, RVLocs, *Context);
2243 unsigned Arg = getRegForValue(Op);
2244 if (Arg == 0)
return false;
2248 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2251 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
2263 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2264 RegArgs, CC, NumBytes,
false))
2267 unsigned CalleeReg = 0;
2268 if (Subtarget->genLongCalls()) {
2270 if (CalleeReg == 0)
return false;
2274 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2276 DbgLoc, TII.
get(CallOpc));
2280 if (Subtarget->genLongCalls())
2286 for (
unsigned R : RegArgs)
2295 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes,
false))
return false;
2298 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2303 bool ARMFastISel::SelectCall(
const Instruction *I,
2304 const char *IntrMemName =
nullptr) {
2309 if (isa<InlineAsm>(Callee))
return false;
2328 else if (!isTypeLegal(RetTy, RetVT) && RetVT !=
MVT::i16 &&
2336 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2356 if (IntrMemName && e - i <= 1)
2375 Type *ArgTy = (*i)->getType();
2377 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT !=
MVT::i16 && ArgVT !=
MVT::i8 &&
2381 unsigned Arg = getRegForValue(*i);
2385 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
2397 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2398 RegArgs, CC, NumBytes, isVarArg))
2401 bool UseReg =
false;
2403 if (!GV || Subtarget->genLongCalls()) UseReg =
true;
2405 unsigned CalleeReg = 0;
2408 CalleeReg = getLibcallReg(IntrMemName);
2410 CalleeReg = getRegForValue(Callee);
2412 if (CalleeReg == 0)
return false;
2416 unsigned CallOpc = ARMSelectCallOp(UseReg);
2418 DbgLoc, TII.
get(CallOpc));
2425 else if (!IntrMemName)
2431 for (
unsigned R : RegArgs)
2440 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2444 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2449 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2453 bool ARMFastISel::ARMTryEmitSmallMemCpy(
Address Dest,
Address Src,
2454 uint64_t Len,
unsigned Alignment) {
2456 if (!ARMIsMemCpySmall(Len))
2461 if (!Alignment || Alignment >= 4) {
2467 assert(Len == 1 &&
"Expected a length of 1!");
2472 if (Len >= 2 && Alignment == 2)
2481 RV = ARMEmitLoad(VT, ResultReg, Src);
2482 assert(RV &&
"Should be able to handle this load.");
2483 RV = ARMEmitStore(VT, ResultReg, Dest);
2484 assert(RV &&
"Should be able to handle this store.");
2489 Dest.Offset +=
Size;
2496 bool ARMFastISel::SelectIntrinsicCall(
const IntrinsicInst &I) {
2499 default:
return false;
2504 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2506 : &ARM::GPRRegClass;
2511 unsigned SrcReg = FramePtr;
2521 DestReg = createResultReg(RC);
2522 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2523 TII.
get(LdrOpc), DestReg)
2524 .addReg(SrcReg).
addImm(0));
2527 updateValueMap(&I, SrcReg);
2540 if (isa<ConstantInt>(MTI.
getLength()) && isMemCpy) {
2543 uint64_t Len = cast<ConstantInt>(MTI.
getLength())->getZExtValue();
2544 if (ARMIsMemCpySmall(Len)) {
2546 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2551 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2562 const char *IntrMemName = isa<MemCpyInst>(
I) ?
"memcpy" :
"memmove";
2563 return SelectCall(&I, IntrMemName);
2577 return SelectCall(&I,
"memset");
2580 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(
2581 Subtarget->useNaClTrap() ? ARM::TRAPNaCl :
ARM::TRAP));
2587 bool ARMFastISel::SelectTrunc(
const Instruction *I) {
2601 unsigned SrcReg = getRegForValue(Op);
2602 if (!SrcReg)
return false;
2606 updateValueMap(I, SrcReg);
2610 unsigned ARMFastISel::ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
2619 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2623 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2624 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2625 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2634 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2635 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2639 static const struct InstructionTable {
2644 }
IT[2][2][3][2] = {
2686 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2687 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2688 "other sizes unimplemented");
2689 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2690 "other sizes unimplemented");
2692 bool hasV6Ops = Subtarget->hasV6Ops();
2693 unsigned Bitness = SrcBits / 8;
2694 assert((Bitness < 3) &&
"sanity-check table bounds");
2696 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2698 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2699 unsigned Opc = ITP->Opc;
2701 unsigned hasS = ITP->hasS;
2704 "only MOVsi has shift operand addressing mode");
2705 unsigned Imm = ITP->Imm;
2708 bool setsCPSR = &ARM::tGPRRegClass == RC;
2709 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2724 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2725 for (
unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2726 ResultReg = createResultReg(RC);
2727 bool isLsl = (0 == Instr) && !isSingleInstr;
2728 unsigned Opcode = isLsl ? LSLOpc : Opc;
2731 bool isKill = 1 == Instr;
2733 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(Opcode), ResultReg);
2737 MIB.
addReg(SrcReg, isKill * RegState::Kill)
2749 bool ARMFastISel::SelectIntExt(
const Instruction *I) {
2756 bool isZExt = isa<ZExtInst>(
I);
2757 unsigned SrcReg = getRegForValue(Src);
2758 if (!SrcReg)
return false;
2760 EVT SrcEVT, DestEVT;
2763 if (!SrcEVT.
isSimple())
return false;
2764 if (!DestEVT.
isSimple())
return false;
2768 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2769 if (ResultReg == 0)
return false;
2770 updateValueMap(I, ResultReg);
2774 bool ARMFastISel::SelectShift(
const Instruction *I,
2786 unsigned Opc = ARM::MOVsr;
2789 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2794 if (ShiftImm == 0 || ShiftImm >=32)
2801 unsigned Reg1 = getRegForValue(Src1Value);
2802 if (Reg1 == 0)
return false;
2805 if (Opc == ARM::MOVsr) {
2806 Reg2 = getRegForValue(Src2Value);
2807 if (Reg2 == 0)
return false;
2810 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2811 if(ResultReg == 0)
return false;
2814 TII.
get(Opc), ResultReg)
2817 if (Opc == ARM::MOVsi)
2819 else if (Opc == ARM::MOVsr) {
2824 AddOptionalDefs(MIB);
2825 updateValueMap(I, ResultReg);
2830 bool ARMFastISel::fastSelectInstruction(
const Instruction *I) {
2833 return SelectLoad(I);
2835 return SelectStore(I);
2836 case Instruction::Br:
2837 return SelectBranch(I);
2838 case Instruction::IndirectBr:
2839 return SelectIndirectBr(I);
2840 case Instruction::ICmp:
2841 case Instruction::FCmp:
2842 return SelectCmp(I);
2843 case Instruction::FPExt:
2844 return SelectFPExt(I);
2845 case Instruction::FPTrunc:
2846 return SelectFPTrunc(I);
2847 case Instruction::SIToFP:
2848 return SelectIToFP(I,
true);
2849 case Instruction::UIToFP:
2850 return SelectIToFP(I,
false);
2851 case Instruction::FPToSI:
2852 return SelectFPToI(I,
true);
2853 case Instruction::FPToUI:
2854 return SelectFPToI(I,
false);
2856 return SelectBinaryIntOp(I,
ISD::ADD);
2857 case Instruction::Or:
2858 return SelectBinaryIntOp(I,
ISD::OR);
2859 case Instruction::Sub:
2860 return SelectBinaryIntOp(I,
ISD::SUB);
2861 case Instruction::FAdd:
2863 case Instruction::FSub:
2865 case Instruction::FMul:
2867 case Instruction::SDiv:
2868 return SelectDiv(I,
true);
2869 case Instruction::UDiv:
2870 return SelectDiv(I,
false);
2871 case Instruction::SRem:
2872 return SelectRem(I,
true);
2873 case Instruction::URem:
2874 return SelectRem(I,
false);
2877 return SelectIntrinsicCall(*II);
2878 return SelectCall(I);
2880 return SelectSelect(I);
2882 return SelectRet(I);
2883 case Instruction::Trunc:
2884 return SelectTrunc(I);
2885 case Instruction::ZExt:
2886 case Instruction::SExt:
2887 return SelectIntExt(I);
2888 case Instruction::Shl:
2890 case Instruction::LShr:
2892 case Instruction::AShr:
2910 { {
ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2911 { { ARM::ANDri, ARM::t2ANDri }, 255, 1,
MVT::i8 },
2912 { {
ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2913 { {
ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2920 bool ARMFastISel::tryToFoldLoadIntoMI(
MachineInstr *MI,
unsigned OpNo,
2924 if (!isLoadTypeLegal(LI->
getType(), VT))
2938 if (FLE.Opc[isThumb2] == MI->
getOpcode() &&
2939 (uint64_t)FLE.ExpectedImm == Imm &&
2942 isZExt = FLE.isZExt;
2945 if (!Found)
return false;
2949 if (!ARMComputeAddress(LI->
getOperand(0), Addr))
return false;
2952 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->
getAlignment(), isZExt,
false))
2955 removeDeadCode(I, std::next(I));
2959 unsigned ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV,
2960 unsigned Align,
MVT VT) {
2963 LLVMContext *Context = &MF->getFunction().getContext();
2965 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2971 unsigned ConstAlign =
2973 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2978 unsigned TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2979 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2981 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(Opc), TempReg)
2982 .addConstantPoolIndex(Idx)
2984 if (Opc == ARM::LDRcp)
2990 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2993 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.
get(Opc), DestReg)
2995 .
addImm(ARMPCLabelIndex);
2997 if (!Subtarget->isThumb())
3000 if (UseGOT_PREL && Subtarget->isThumb()) {
3002 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3003 TII.
get(ARM::t2LDRi12), NewDestReg)
3006 DestReg = NewDestReg;
3007 AddOptionalDefs(MIB);
3012 bool ARMFastISel::fastLowerArguments() {
3013 if (!FuncInfo.CanLowerReturn)
3036 if (
Arg.getArgNo() >= 4)
3051 if (!ArgVT.
isSimple())
return false;
3063 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
3068 unsigned ArgNo =
Arg.getArgNo();
3069 unsigned SrcReg = GPRArgRegs[ArgNo];
3070 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3074 unsigned ResultReg = createResultReg(RC);
3075 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3076 TII.
get(TargetOpcode::COPY),
3078 updateValueMap(&
Arg, ResultReg);
3089 return new ARMFastISel(funcInfo, libInfo);
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Return a value (possibly void), from a function.
User::const_op_iterator arg_iterator
The type of iterator to use when looping over actual arguments at this call site. ...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
const MachineInstrBuilder & add(const MachineOperand &MO) const
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This class is the base class for the comparison instructions.
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
C - The default llvm calling convention, compatible with C.
uint64_t getZExtValue() const
Get zero extended value.
This class represents an incoming formal argument to a Function.
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
unsigned arg_size() const
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
virtual bool isFPImmLegal(const APFloat &, EVT) const
Returns true if the target can instruction select the specified FP immediate natively.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class represents lattice values for constants.
A Module instance is used to store all the information related to an LLVM module. ...
bool isVector() const
Return true if this is a vector value type.
bool useFastISel() const
True if fast-isel is used.
void push_back(const T &Elt)
ARMConstantPoolValue - ARM specific constantpool value.
Describe properties that are true of each instruction in the target description file.
unsigned getReg() const
getReg - Returns the register number.
This class represents a function call, abstracting a target machine's calling convention.
static PointerType * getInt32PtrTy(LLVMContext &C, unsigned AS=0)
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
unsigned getSourceAlignment() const
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
0 1 0 0 True if ordered and less than
unsigned getSourceAddressSpace() const
Externally visible function.
unsigned getValNo() const
LLVMContext & getContext() const
All values hold a context through their type.
1 1 1 0 True if unordered or not equal
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
This class wraps the llvm.memset intrinsic.
BasicBlock * getSuccessor(unsigned i) const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
unsigned const TargetRegisterInfo * TRI
unsigned getCallFrameDestroyOpcode() const
An instruction for reading from memory.
Value * getCondition() const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
bool isVectorTy() const
True if this is an instance of VectorType.
void reserve(size_type N)
iterator_range< mop_iterator > operands()
Value * getLength() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
unsigned createPICLabelUId()
1 0 0 1 True if unordered or equal
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
A description of a memory reference used in the backend.
amdgpu Simplify well known AMD library false Value Value const Twine & Name
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const HexagonInstrInfo * TII
PointerType * getType() const
Overload to return most specific pointer type.
unsigned getNumOperands() const
Retuns the total number of operands.
Class to represent struct types.
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
A Use represents the edge between a Value definition and its users.
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isIntegerTy() const
True if this is an instance of IntegerType.
0 1 0 1 True if ordered and less than or equal
This file contains the simple types necessary to represent the attributes associated with functions a...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getDestAlignment() const
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This file implements a class to represent arbitrary precision integral constant values and operations...
unsigned getSizeInBits() const
Fast - This calling convention attempts to make calls as fast as possible (e.g.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Class to represent function types.
A constant value that is initialized with an expression using other constant values.
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
int64_t getSExtValue() const
Get sign extended value.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Type * getType() const
All values are typed, get the type of this value.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
Simple integer binary arithmetic operators.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isThumb2Function() const
static const MCPhysReg GPRArgRegs[]
void setOrigAlign(unsigned A)
amdgpu Simplify well known AMD library false Value * Callee
static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]
This class represents a truncation of integer types.
Value * getOperand(unsigned i) const
Class to represent pointers.
unsigned getKillRegState(bool B)
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
TargetInstrInfo - Interface to description of machine instruction set.
iterator find(const_arg_type_t< KeyT > Val)
bool isVoidTy() const
Return true if this is 'void'.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
unsigned const MachineRegisterInfo * MRI
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
Value * getCalledValue() const
LLVM Basic Block Representation.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
Simple binary floating point operators.
Conditional or Unconditional Branch instruction.
This is an important base class in LLVM.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Indirect Branch Instruction.
ConstantFP - Floating Point Values [float, double].
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
const MCPhysReg * ImplicitDefs
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
This file declares a class to represent arbitrary precision floating point values and provide a varie...
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
TRAP - Trapping instruction.
Thread Local Storage (General Dynamic Mode)
0 1 1 1 True if ordered (no nans)
const MachineInstrBuilder & addFrameIndex(int Idx) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Type * getIndexedType() const
bool isPositionIndependent() const
static bool isAtomic(Instruction *I)
1 1 0 1 True if unordered, less than, or equal
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
The memory access writes data.
const APFloat & getValueAPF() const
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
0 0 1 0 True if ordered and greater than
unsigned getNumOperands() const
StructType * getStructTypeOrNull() const
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
CCState - This class holds information needed while lowering arguments and return values...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
This is the shared class of boolean and integer constants.
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
1 1 0 0 True if unordered or less than
Module.h This file contains the declarations for the Module class.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Value * getRawSource() const
Return the arguments to the instruction.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Target - Wrapper for Target specific information.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Class for arbitrary precision integers.
This file defines the FastISel class.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
Flags
Flags values. These may be or'd together.
amdgpu Simplify well known AMD library false Value Value * Arg
The memory access reads data.
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate IT block based on arch"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT, "arm-no-restrict-it", "Allow IT blocks based on ARMv7")))
Representation of each machine instruction.
Predicate getPredicate() const
Return the predicate for this instruction.
This class wraps the llvm.memcpy/memmove intrinsics.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
unsigned getDestAddressSpace() const
uint64_t getElementOffset(unsigned Idx) const
unsigned getAlignment() const
Return the alignment of the access that is being performed.
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned getLocMemOffset() const
unsigned greater or equal
static unsigned UseReg(const MachineOperand &MO)
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Establish a view to a call site for examination.
const Function * getParent() const
Return the enclosing method, or null if none.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
0 1 1 0 True if ordered and operands are unequal
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
iterator_range< const_opInfo_iterator > operands() const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
1 0 1 0 True if unordered or greater than
constexpr bool isUInt< 16 >(uint64_t x)
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
0 0 0 1 True if ordered and equal
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
bool isEquality() const
This is just a convenience that dispatches to the subclasses.
1 0 1 1 True if unordered, greater than, or equal
FunctionType * getFunctionType() const
static const Function * getParent(const Value *V)
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which...
Primary interface to the complete machine description for the target machine.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
bool hasOneUse() const
Return true if there is exactly one user of this value.
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, const MachineOperand &RegMO, unsigned OpIdx)
Try to constrain Reg so that it is usable by argument OpIdx of the provided MCInstrDesc II...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
unsigned getLocReg() const
This holds information about one operand of a machine instruction, indicating the register class for ...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
const MachineOperand & getOperand(unsigned i) const
0 0 1 1 True if ordered and greater than or equal
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isThumbFunction() const
Value * getRawDest() const
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
PointerType * getType() const
Global values are always pointers.
iterator_range< arg_iterator > args()
bool isStructTy() const
True if this is an instance of StructType.
bool isArrayTy() const
True if this is an instance of ArrayType.
A wrapper class for inspecting calls to intrinsic functions.
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
an instruction to allocate memory on the stack
gep_type_iterator gep_type_begin(const User *GEP)
FloatABI::ABIType FloatABIType
FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line...