72 #define DEBUG_TYPE "instcombine" 74 STATISTIC(NumSimplified,
"Number of library calls simplified");
77 "instcombine-guard-widening-window",
79 cl::desc(
"How wide an instruction window to bypass looking for " 86 if (ITy->getBitWidth() < 32)
99 assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
100 "Unexpected constant data vector element type");
102 ? cast<ConstantInt>(Elt)->isNegative()
103 : cast<ConstantFP>(Elt)->isNegative();
111 unsigned CopyDstAlign = MI->getDestAlignment();
112 if (CopyDstAlign < DstAlign){
113 MI->setDestAlignment(DstAlign);
118 unsigned CopySrcAlign = MI->getSourceAlignment();
119 if (CopySrcAlign < SrcAlign) {
120 MI->setSourceAlignment(SrcAlign);
127 if (!MemOpLength)
return nullptr;
134 assert(Size &&
"0-sized memory transferring should be removed already.");
136 if (Size > 8 || (Size&(Size-1)))
143 if (isa<AtomicMemTransferInst>(MI))
144 if (CopyDstAlign < Size || CopySrcAlign < Size)
149 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
151 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
163 if (M->getNumOperands() == 3 && M->getOperand(0) &&
164 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
165 mdconst::extract<ConstantInt>(M->getOperand(0))->
isZero() &&
167 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
168 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
170 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
171 CopyMD = cast<MDNode>(M->getOperand(2));
174 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
175 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
176 LoadInst *L = Builder.CreateLoad(Src);
181 MDNode *LoopMemParallelMD =
183 if (LoopMemParallelMD)
189 StoreInst *S = Builder.CreateStore(L, Dest);
194 if (LoopMemParallelMD)
199 if (
auto *MT = dyn_cast<MemTransferInst>(MI)) {
204 if (isa<AtomicMemTransferInst>(MI)) {
217 if (MI->getDestAlignment() < Alignment) {
218 MI->setDestAlignment(Alignment);
228 Alignment = MI->getDestAlignment();
229 assert(Len &&
"0-sized memory setting should be removed already.");
239 if (isa<AtomicMemSetInst>(MI))
247 Value *Dest = MI->getDest();
248 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
250 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
253 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
257 if (isa<AtomicMemSetInst>(MI))
270 bool LogicalShift =
false;
271 bool ShiftLeft =
false;
293 LogicalShift =
false; ShiftLeft =
false;
313 LogicalShift =
true; ShiftLeft =
false;
333 LogicalShift =
true; ShiftLeft =
true;
336 assert((LogicalShift || !ShiftLeft) &&
"Only logical shifts can shift left");
343 if (!CAZ && !CDV && !CInt)
350 auto VT = cast<VectorType>(CDV->getType());
351 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
352 assert((64 % BitWidth) == 0 &&
"Unexpected packed shift size");
353 unsigned NumSubElts = 64 / BitWidth;
356 for (
unsigned i = 0; i != NumSubElts; ++i) {
357 unsigned SubEltIdx = (NumSubElts - 1) - i;
358 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
364 Count = CInt->getValue();
367 auto VT = cast<VectorType>(Vec->getType());
368 auto SVT = VT->getElementType();
369 unsigned VWidth = VT->getNumElements();
370 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
377 if (Count.
uge(BitWidth)) {
383 Count =
APInt(64, BitWidth - 1);
404 bool LogicalShift =
false;
405 bool ShiftLeft =
false;
418 LogicalShift =
false;
446 assert((LogicalShift || !ShiftLeft) &&
"Only logical shifts can shift left");
454 auto VT = cast<VectorType>(II.
getType());
455 auto SVT = VT->getVectorElementType();
456 int NumElts = VT->getNumElements();
457 int BitWidth = SVT->getIntegerBitWidth();
461 bool AnyOutOfRange =
false;
463 for (
int I = 0;
I < NumElts; ++
I) {
464 auto *CElt = CShift->getAggregateElement(
I);
465 if (CElt && isa<UndefValue>(CElt)) {
470 auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
477 APInt ShiftVal = COp->getValue();
478 if (ShiftVal.
uge(BitWidth)) {
479 AnyOutOfRange = LogicalShift;
480 ShiftAmts.
push_back(LogicalShift ? BitWidth : BitWidth - 1);
489 auto OutOfRange = [&](
int Idx) {
return (Idx < 0) || (BitWidth <= Idx); };
492 for (
int Idx : ShiftAmts) {
496 assert(LogicalShift &&
"Logical shift expected");
509 for (
int Idx : ShiftAmts) {
532 if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
539 assert(NumDstElts == (2 * NumSrcElts) &&
"Unexpected packing types");
541 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
542 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
545 "Unexpected packing types");
554 for (
unsigned Lane = 0; Lane != NumLanes; ++Lane) {
555 for (
unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
556 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
557 auto *Cst = (Elt >= NumSrcEltsPerLane) ? Cst1 : Cst0;
558 auto *COp = Cst->getAggregateElement(SrcIdx);
559 if (COp && isa<UndefValue>(COp)) {
564 auto *CInt = dyn_cast_or_null<ConstantInt>(COp);
568 APInt Val = CInt->getValue();
570 "Unexpected constant bitwidth");
577 Val = Val.
trunc(DstScalarSizeInBits);
586 if (Val.
isIntN(DstScalarSizeInBits))
587 Val = Val.
trunc(DstScalarSizeInBits);
632 SAE = Arg->getZExtValue();
635 if (SAE != 4 || (RoundControl != 2 && RoundControl != 1 ))
639 bool IsScalar =
false;
675 if (
auto *
C = dyn_cast<Constant>(Mask))
676 if (
C->isAllOnesValue())
682 if (MaskTy->getVectorNumElements() > Width) {
684 for (
unsigned i = 0; i != Width; ++i)
707 if (isa<UndefValue>(Arg))
714 if (
auto *
C = dyn_cast<Constant>(Arg)) {
718 auto *COp =
C->getAggregateElement(
I);
721 if (isa<UndefValue>(COp))
729 if ((CInt && CInt->isNegative()) || (CFp && CFp->isNegative()))
770 uint8_t Imm = CInt->getZExtValue();
771 uint8_t ZMask = Imm & 0xf;
772 uint8_t DestLane = (Imm >> 4) & 0x3;
773 uint8_t SourceLane = (Imm >> 6) & 0x3;
783 uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
792 (ZMask & (1 << DestLane))) {
796 ShuffleMask[DestLane] = SourceLane;
798 for (
unsigned i = 0; i < 4; ++i)
799 if ((ZMask >> i) & 0x1)
800 ShuffleMask[i] = i + 4;
807 ShuffleMask[DestLane] = SourceLane + 4;
818 auto LowConstantHighUndef = [&](uint64_t Val) {
832 if (CILength && CIIndex) {
842 unsigned Length = APLength == 0 ? 64 : APLength.
getZExtValue();
846 unsigned End = Index + Length;
857 if ((Length % 8) == 0 && (Index % 8) == 0) {
867 for (
int i = 0; i != (int)Length; ++i)
870 for (
int i = Length; i != 8; ++i)
873 for (
int i = 8; i != 16; ++i)
885 APInt Elt = CI0->getValue();
893 Value *
Args[] = {Op0, CILength, CIIndex};
901 if (CI0 && CI0->isZero())
902 return LowConstantHighUndef(0);
922 unsigned Length = APLength == 0 ? 64 : APLength.
getZExtValue();
926 unsigned End = Index + Length;
937 if ((Length % 8) == 0 && (Index % 8) == 0) {
947 for (
int i = 0; i != (int)Index; ++i)
949 for (
int i = 0; i != (int)Length; ++i)
952 for (
int i = Index + Length; i != 8; ++i)
954 for (
int i = 8; i != 16; ++i)
970 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((
unsigned)0))
975 APInt V00 = CI00->getValue();
976 APInt V10 = CI10->getValue();
980 APInt Val = V00 | V10;
994 Value *
Args[] = {Op0, Op1, CILength, CIIndex};
1010 auto *VecTy = cast<VectorType>(II.
getType());
1012 unsigned NumElts = VecTy->getNumElements();
1013 assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
1014 "Unexpected number of elements in shuffle mask!");
1021 for (
unsigned I = 0;
I < NumElts; ++
I) {
1023 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1026 if (isa<UndefValue>(COp)) {
1031 int8_t
Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
1040 Index = ((Index < 0) ? NumElts : Index & 0x0F) + (
I & 0xF0);
1057 auto *VecTy = cast<VectorType>(II.
getType());
1060 bool IsPD = VecTy->getScalarType()->isDoubleTy();
1061 unsigned NumLaneElts = IsPD ? 2 : 4;
1062 assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
1068 for (
unsigned I = 0;
I < NumElts; ++
I) {
1070 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1073 if (isa<UndefValue>(COp)) {
1078 APInt Index = cast<ConstantInt>(COp)->getValue();
1089 Index +=
APInt(32, (
I / NumLaneElts) * NumLaneElts);
1107 auto *VecTy = cast<VectorType>(II.
getType());
1109 unsigned Size = VecTy->getNumElements();
1110 assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1111 "Unexpected shuffle mask size");
1116 for (
unsigned I = 0;
I <
Size; ++
I) {
1118 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1121 if (isa<UndefValue>(COp)) {
1141 if (
auto *CInt = dyn_cast<ConstantInt>(II.
getArgOperand(2))) {
1142 uint64_t Imm = CInt->getZExtValue() & 0x7;
1180 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1182 for (
unsigned I = 0,
E = ConstMask->getType()->getVectorNumElements();
I !=
E;
1184 if (
auto *MaskElt = ConstMask->getAggregateElement(
I))
1185 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1198 unsigned Alignment = cast<ConstantInt>(II.
getArgOperand(1))->getZExtValue();
1211 if (ConstMask->isNullValue())
1215 if (ConstMask->isAllOnesValue()) {
1217 unsigned Alignment = cast<ConstantInt>(II.
getArgOperand(2))->getZExtValue();
1227 if (ConstMask && ConstMask->isNullValue())
1246 if (StrippedArg == StrippedInvariantGroupsArg)
1249 Value *Result =
nullptr;
1257 "simplifyInvariantGroupIntrinsic only handles launder and strip");
1264 return cast<Instruction>(Result);
1270 if (ConstMask && ConstMask->isNullValue())
1279 "Expected cttz or ctlz intrinsic");
1295 if (PossibleZeros == DefiniteZeros) {
1329 "Expected ctpop intrinsic");
1336 unsigned BitWidth =
IT->getBitWidth();
1366 if (isa<ConstantAggregateZero>(Mask))
1378 unsigned AddrSpace = cast<PointerType>(Ptr->
getType())->getAddressSpace();
1402 if (isa<ConstantAggregateZero>(Mask)) {
1421 unsigned AddrSpace = cast<PointerType>(Ptr->
getType())->getAddressSpace();
1447 return maxnum(Src1, Src2);
1452 return maxnum(Src0, Src2);
1454 return maxnum(Src0, Src1);
1468 auto *VecTy = cast<VectorType>(II.
getType());
1469 unsigned NumElts = VecTy->getNumElements();
1472 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
1477 for (
unsigned I = 0;
I < NumElts; ++
I) {
1480 if (!COp || !isa<ConstantInt>(COp))
1483 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
1486 if (Indexes[
I] >= NumElts)
1509 MemAlign : IntrAlign->getLimitedValue();
1522 unsigned NumOperands) {
1525 for (
unsigned i = 0; i < NumOperands; i++)
1543 "Start intrinsic does not have expected ID");
1545 for (++BI; BI != BE; ++BI) {
1546 if (
auto *
E = dyn_cast<IntrinsicInst>(BI)) {
1547 if (isa<DbgInfoIntrinsic>(
E) ||
E->getIntrinsicID() == StartID)
1549 if (
E->getIntrinsicID() == EndID &&
1574 enum FtzRequirementTy {
1588 struct SimplifyAction {
1595 FtzRequirementTy FtzRequirement = FTZ_Any;
1597 SimplifyAction() =
default;
1600 : IID(IID), FtzRequirement(FtzReq) {}
1607 : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1609 SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1610 : Special(Special), FtzRequirement(FtzReq) {}
1615 const SimplifyAction Action = [II]() -> SimplifyAction {
1688 return {Instruction::FPToSI};
1693 return {Instruction::FPToUI};
1698 return {Instruction::SIToFP};
1703 return {Instruction::UIToFP};
1707 return {Instruction::FAdd, FTZ_Any};
1709 return {Instruction::FAdd, FTZ_MustBeOff};
1711 return {Instruction::FAdd, FTZ_MustBeOn};
1713 return {Instruction::FMul, FTZ_Any};
1715 return {Instruction::FMul, FTZ_MustBeOff};
1717 return {Instruction::FMul, FTZ_MustBeOn};
1719 return {Instruction::FDiv, FTZ_Any};
1721 return {Instruction::FDiv, FTZ_MustBeOff};
1723 return {Instruction::FDiv, FTZ_MustBeOn};
1731 return {SPC_Reciprocal, FTZ_Any};
1733 return {SPC_Reciprocal, FTZ_MustBeOff};
1735 return {SPC_Reciprocal, FTZ_MustBeOn};
1766 if (Action.FtzRequirement != FTZ_Any) {
1771 if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1786 if (Action.BinaryOp)
1796 if (!Action.Special)
1799 switch (*Action.Special) {
1800 case SPC_Reciprocal:
1806 llvm_unreachable(
"All SpecialCase enumerators should be handled in switch.");
1822 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
1835 return replaceInstUsesWith(CI, V);
1838 return visitFree(CI);
1848 if (!II)
return visitCallSite(&CI);
1852 if (
auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1853 bool Changed =
false;
1857 if (NumBytes->isNullValue())
1858 return eraseInstFromFunction(CI);
1860 if (
ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1861 if (CI->getZExtValue() == 1) {
1869 if (
auto *M = dyn_cast<MemIntrinsic>(MI))
1870 if (M->isVolatile())
1876 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1877 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1878 if (GVSrc->isConstant()) {
1881 isa<AtomicMemMoveInst>(MMI)
1894 if (MTI->getSource() == MTI->getDest())
1895 return eraseInstFromFunction(CI);
1900 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1903 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1908 if (Changed)
return II;
1914 auto SimplifyDemandedVectorEltsLow = [
this](
Value *
Op,
unsigned Width,
1915 unsigned DemandedWidth) {
1916 APInt UndefElts(Width, 0);
1918 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1926 return replaceInstUsesWith(CI,
N);
1937 Value *V = Builder.CreateLShr(X, CV);
1944 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1955 return replaceInstUsesWith(*II, SkippedBarrier);
1962 if (Power->isMinusOne())
1966 if (Power->equalsInt(2))
1989 uint64_t ShiftAmt = SA->
urem(BitWidth);
1990 assert(ShiftAmt != 0 &&
"SimplifyCall should have handled zero shift");
1993 ShiftAmt = BitWidth - ShiftAmt;
1998 return BinaryOperator::CreateShl(
2004 return BinaryOperator::CreateLShr(
2016 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2034 Value *OperationResult =
nullptr;
2038 return CreateOverflowTuple(II, OperationResult, OverflowResult);
2062 return BinaryOperator::CreateNUWAdd(Arg0, Arg1);
2064 return replaceInstUsesWith(*II,
2070 return BinaryOperator::CreateNUWSub(Arg0, Arg1);
2072 return replaceInstUsesWith(*II,
2076 if (willNotOverflowSignedAdd(Arg0, Arg1, *II))
2077 return BinaryOperator::CreateNSWAdd(Arg0, Arg1);
2080 if (willNotOverflowSignedSub(Arg0, Arg1, *II))
2081 return BinaryOperator::CreateNSWSub(Arg0, Arg1);
2090 return replaceInstUsesWith(
2091 *II, Builder.CreateBinaryIntrinsic(
2098 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2100 const APInt *Val, *Val2;
2112 NewVal = Val->
sadd_ov(*Val2, Overflow);
2123 return replaceInstUsesWith(
2124 *II, Builder.CreateBinaryIntrinsic(
2163 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2171 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2194 Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2197 return replaceInstUsesWith(*II, NewCall);
2212 return replaceInstUsesWith(*II, Add);
2241 auto *FAdd = BinaryOperator::CreateFAdd(Src0, II->
getArgOperand(2));
2303 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2311 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2322 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2323 return new StoreInst(II->getArgOperand(0), Ptr);
2330 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2331 return new StoreInst(II->getArgOperand(0), Ptr,
false, 1);
2338 II->getType()->getVectorNumElements());
2339 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2349 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2359 II->getArgOperand(0)->getType()->getVectorNumElements());
2360 Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
2362 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2372 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2373 return new StoreInst(II->getArgOperand(0), Ptr);
2382 if (
auto *
C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2383 uint64_t Shift =
C->getZExtValue();
2384 uint64_t Length = (Shift >> 8) & 0xff;
2388 if (Length == 0 || Shift >= BitWidth)
2391 if (
auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2392 uint64_t Result = InC->getZExtValue() >> Shift;
2393 if (Length > BitWidth)
2395 Result &= maskTrailingOnes<uint64_t>(Length);
2406 if (
auto *
C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2407 uint64_t
Index =
C->getZExtValue() & 0xff;
2409 if (Index >= BitWidth)
2414 if (
auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2415 uint64_t Result = InC->getZExtValue();
2416 Result &= maskTrailingOnes<uint64_t>(
Index);
2425 auto Arg = II->getArgOperand(0);
2426 auto ArgType = cast<VectorType>(
Arg->
getType());
2427 auto RetType = cast<VectorType>(II->getType());
2428 unsigned ArgWidth = ArgType->getNumElements();
2429 unsigned RetWidth = RetType->getNumElements();
2430 assert(RetWidth <= ArgWidth &&
"Unexpected input/return vector widths");
2431 assert(ArgType->isIntOrIntVectorTy() &&
2432 ArgType->getScalarSizeInBits() == 16 &&
2433 "CVTPH2PS input type should be 16-bit integer vector");
2434 assert(RetType->getScalarType()->isFloatTy() &&
2435 "CVTPH2PS output type should be 32-bit float vector");
2438 if (isa<ConstantAggregateZero>(
Arg))
2441 if (isa<ConstantDataVector>(
Arg)) {
2442 auto VectorHalfAsShorts =
Arg;
2443 if (RetWidth < ArgWidth) {
2445 for (
unsigned i = 0; i != RetWidth; ++i)
2447 VectorHalfAsShorts = Builder.CreateShuffleVector(
2451 auto VectorHalfType =
2454 Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2455 auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
2456 return replaceInstUsesWith(*II, VectorFloats);
2460 if (
Value *V = SimplifyDemandedVectorEltsLow(
Arg, ArgWidth, RetWidth)) {
2461 II->setArgOperand(0, V);
2495 if (
Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
2496 II->setArgOperand(0, V);
2515 return replaceInstUsesWith(*II, V);
2526 return replaceInstUsesWith(*II, V);
2559 bool MadeChange =
false;
2560 Value *Arg0 = II->getArgOperand(0);
2561 Value *Arg1 = II->getArgOperand(1);
2563 if (
Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2564 II->setArgOperand(0, V);
2567 if (
Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2568 II->setArgOperand(1, V);
2582 Value *Arg0 = II->getArgOperand(0);
2583 Value *Arg1 = II->getArgOperand(1);
2597 cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2600 II->setArgOperand(0, A);
2601 II->setArgOperand(1, B);
2617 if (
auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2618 if (R->getValue() == 4) {
2619 Value *Arg0 = II->getArgOperand(0);
2620 Value *Arg1 = II->getArgOperand(1);
2623 switch (II->getIntrinsicID()) {
2627 V = Builder.CreateFAdd(Arg0, Arg1);
2631 V = Builder.CreateFSub(Arg0, Arg1);
2635 V = Builder.CreateFMul(Arg0, Arg1);
2639 V = Builder.CreateFDiv(Arg0, Arg1);
2643 return replaceInstUsesWith(*II, V);
2658 if (
auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2659 if (R->getValue() == 4) {
2661 Value *Arg0 = II->getArgOperand(0);
2662 Value *Arg1 = II->getArgOperand(1);
2663 Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2664 Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
2667 switch (II->getIntrinsicID()) {
2671 V = Builder.CreateFAdd(LHS, RHS);
2675 V = Builder.CreateFSub(LHS, RHS);
2679 V = Builder.CreateFMul(LHS, RHS);
2683 V = Builder.CreateFDiv(LHS, RHS);
2691 if (!
C || !
C->getValue()[0]) {
2695 Mask = Builder.CreateBitCast(Mask, MaskTy);
2696 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
2698 Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
2700 V = Builder.CreateSelect(Mask, V, Passthru);
2704 V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
2706 return replaceInstUsesWith(*II, V);
2724 unsigned VWidth = II->getType()->getVectorNumElements();
2725 APInt UndefElts(VWidth, 0);
2727 if (
Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
2729 return replaceInstUsesWith(*II, V);
2736 unsigned VWidth = II->getType()->getVectorNumElements();
2737 APInt UndefElts(VWidth, 0);
2739 if (
Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
2741 return replaceInstUsesWith(*II, V);
2744 return replaceInstUsesWith(*II, V);
2779 return replaceInstUsesWith(*II, V);
2810 return replaceInstUsesWith(*II, V);
2814 Value *Arg1 = II->getArgOperand(1);
2816 "Unexpected packed shift size");
2819 if (
Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
2820 II->setArgOperand(1, V);
2854 return replaceInstUsesWith(*II, V);
2864 return replaceInstUsesWith(*II, V);
2874 return replaceInstUsesWith(*II, V);
2880 if (
auto *
C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2881 unsigned Imm =
C->getZExtValue();
2883 bool MadeChange =
false;
2884 Value *Arg0 = II->getArgOperand(0);
2885 Value *Arg1 = II->getArgOperand(1);
2888 APInt UndefElts1(VWidth, 0);
2890 APInt(2, (Imm & 0x01) ? 2 : 1));
2891 if (
Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
2893 II->setArgOperand(0, V);
2897 APInt UndefElts2(VWidth, 0);
2899 APInt(2, (Imm & 0x10) ? 2 : 1));
2900 if (
Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
2902 II->setArgOperand(1, V);
2909 return replaceInstUsesWith(*II,
2920 return replaceInstUsesWith(*II, V);
2924 Value *Op0 = II->getArgOperand(0);
2925 Value *Op1 = II->getArgOperand(1);
2930 VWidth1 == 16 &&
"Unexpected operand sizes");
2935 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((
unsigned)0))
2938 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((
unsigned)1))
2943 return replaceInstUsesWith(*II, V);
2947 bool MadeChange =
false;
2948 if (
Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2949 II->setArgOperand(0, V);
2952 if (
Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2953 II->setArgOperand(1, V);
2964 Value *Op0 = II->getArgOperand(0);
2967 "Unexpected operand size");
2975 return replaceInstUsesWith(*II, V);
2979 if (
Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2980 II->setArgOperand(0, V);
2987 Value *Op0 = II->getArgOperand(0);
2988 Value *Op1 = II->getArgOperand(1);
2993 "Unexpected operand size");
2998 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((
unsigned)1))
3003 const APInt &V11 = CI11->getValue();
3007 return replaceInstUsesWith(*II, V);
3012 if (
Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
3013 II->setArgOperand(0, V);
3023 Value *Op0 = II->getArgOperand(0);
3024 Value *Op1 = II->getArgOperand(1);
3029 VWidth1 == 2 &&
"Unexpected operand sizes");
3036 if (CILength && CIIndex) {
3040 return replaceInstUsesWith(*II, V);
3045 bool MadeChange =
false;
3046 if (
Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3047 II->setArgOperand(0, V);
3050 if (
Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
3051 II->setArgOperand(1, V);
3066 Value *Op0 = II->getArgOperand(0);
3067 Value *Op1 = II->getArgOperand(1);
3070 return replaceInstUsesWith(CI, Op0);
3073 if (isa<ConstantAggregateZero>(Mask))
3074 return replaceInstUsesWith(CI, Op0);
3077 if (
auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3090 II->getType()->getPrimitiveSizeInBits() &&
3091 "Not expecting mask and operands with different sizes");
3094 unsigned NumOperandElts = II->getType()->getVectorNumElements();
3095 if (NumMaskElts == NumOperandElts)
3100 if (NumMaskElts < NumOperandElts) {
3101 Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->
getType());
3102 Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->
getType());
3103 Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3115 return replaceInstUsesWith(*II, V);
3125 return replaceInstUsesWith(*II, V);
3143 return replaceInstUsesWith(*II, V);
3176 return replaceInstUsesWith(*II, V);
3184 return replaceInstUsesWith(*II, V);
3194 if (
Constant *
Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3195 assert(
Mask->getType()->getVectorNumElements() == 16 &&
3196 "Bad type for intrinsic!");
3199 bool AllEltsOk =
true;
3200 for (
unsigned i = 0; i != 16; ++i) {
3202 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
3210 Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3212 Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3217 Value *ExtractedElts[32];
3218 memset(ExtractedElts, 0,
sizeof(ExtractedElts));
3220 for (
unsigned i = 0; i != 16; ++i) {
3221 if (isa<UndefValue>(
Mask->getAggregateElement(i)))
3224 cast<ConstantInt>(
Mask->getAggregateElement(i))->getZExtValue();
3226 if (DL.isLittleEndian())
3229 if (!ExtractedElts[Idx]) {
3230 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
3231 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
3232 ExtractedElts[Idx] =
3233 Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
3234 Builder.getInt32(Idx&15));
3238 Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3239 Builder.getInt32(i));
3250 return replaceInstUsesWith(*II, V);
3269 unsigned AlignArg = II->getNumArgOperands() - 1;
3271 if (IntrAlign && IntrAlign->
getZExtValue() < MemAlign) {
3272 II->setArgOperand(AlignArg,
3283 return replaceInstUsesWith(*II, V);
3290 Value *Arg0 = II->getArgOperand(0);
3291 Value *Arg1 = II->getArgOperand(1);
3294 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3301 VectorType *NewVT = cast<VectorType>(II->getType());
3302 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3303 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3315 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
3317 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3328 Value *DataArg = II->getArgOperand(0);
3329 Value *KeyArg = II->getArgOperand(1);
3335 II->setArgOperand(0, Data);
3336 II->setArgOperand(1, Key);
3342 Value *Src = II->getArgOperand(0);
3345 if (isa<UndefValue>(Src))
3346 return replaceInstUsesWith(CI, Src);
3348 if (
const ConstantFP *
C = dyn_cast<ConstantFP>(Src)) {
3349 const APFloat &ArgVal =
C->getValueAPF();
3356 return replaceInstUsesWith(CI,
ConstantFP::get(II->getContext(), Val));
3362 Value *Src = II->getArgOperand(0);
3365 if (isa<UndefValue>(Src))
3366 return replaceInstUsesWith(CI, Src);
3371 Value *Src = II->getArgOperand(0);
3372 if (
const ConstantFP *
C = dyn_cast<ConstantFP>(Src)) {
3389 if (isa<UndefValue>(Src))
3411 Value *Src0 = II->getArgOperand(0);
3412 Value *Src1 = II->getArgOperand(1);
3415 if (isa<UndefValue>(Src0))
3418 if (isa<UndefValue>(Src1))
3426 if ((Mask & FullMask) == FullMask)
3429 if ((Mask & FullMask) == 0)
3434 Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
3436 return replaceInstUsesWith(*II, FCmp);
3441 Value *FCmp = Builder.CreateFCmpOEQ(
3445 return replaceInstUsesWith(*II, FCmp);
3451 Mask & ~(S_NAN |
Q_NAN)));
3457 if (isa<UndefValue>(Src0))
3461 if ((Mask & FullMask) !=
Mask) {
3462 CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
3467 return replaceInstUsesWith(*II, NewCall);
3490 Value *Src0 = II->getArgOperand(0);
3491 Value *Src1 = II->getArgOperand(1);
3492 if (
const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3493 if (
const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3495 = II->getType()->getScalarType()->getFltSemantics();
3497 APFloat Val0 = C0->getValueAPF();
3498 APFloat Val1 = C1->getValueAPF();
3505 return replaceInstUsesWith(*II, Folded);
3509 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3518 Value *Src0 = II->getArgOperand(0);
3519 Value *Src1 = II->getArgOperand(1);
3521 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3529 Value *Src = II->getArgOperand(0);
3530 if (isa<UndefValue>(Src))
3531 return replaceInstUsesWith(*II, Src);
3534 Type *Ty = II->getType();
3540 if ((Width & (IntSize - 1)) == 0)
3543 if (Width >= IntSize) {
3546 Width & (IntSize - 1)));
3555 if (Offset >= IntSize) {
3557 Offset & (IntSize - 1)));
3564 if (!CWidth || !COffset)
3574 if (Offset + Width < IntSize) {
3575 Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3576 Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3577 : Builder.CreateLShr(Shl, IntSize - Width);
3579 return replaceInstUsesWith(*II, RightShift);
3582 Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3583 : Builder.CreateLShr(Src, Offset);
3586 return replaceInstUsesWith(*II, RightShift);
3599 bool Changed =
false;
3600 for (
int I = 0;
I < (IsCompr ? 2 : 4); ++
I) {
3601 if ((!IsCompr && (EnBits & (1 <<
I)) == 0) ||
3602 (IsCompr && ((EnBits & (0x3 << (2 *
I))) == 0))) {
3603 Value *Src = II->getArgOperand(
I + 2);
3604 if (!isa<UndefValue>(Src)) {
3620 Value *Src0 = II->getArgOperand(0);
3621 Value *Src1 = II->getArgOperand(1);
3622 Value *Src2 = II->getArgOperand(2);
3628 if (
match(Src0,
m_NaN()) || isa<UndefValue>(Src0)) {
3629 NewCall = Builder.CreateMinNum(Src1, Src2);
3630 }
else if (
match(Src1,
m_NaN()) || isa<UndefValue>(Src1)) {
3631 NewCall = Builder.CreateMinNum(Src0, Src2);
3632 }
else if (
match(Src2,
m_NaN()) || isa<UndefValue>(Src2)) {
3633 NewCall = Builder.CreateMaxNum(Src0, Src1);
3639 return replaceInstUsesWith(*II, NewCall);
3646 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3651 if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3656 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3662 II->setArgOperand(0, Src0);
3663 II->setArgOperand(1, Src1);
3664 II->setArgOperand(2, Src2);
3668 if (
const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3669 if (
const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3670 if (
const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3673 return replaceInstUsesWith(*II,
3696 Value *Src0 = II->getArgOperand(0);
3697 Value *Src1 = II->getArgOperand(1);
3699 if (
auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3700 if (
auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3703 return replaceInstUsesWith(
3717 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3720 NewCall->takeName(II);
3721 return replaceInstUsesWith(*II, NewCall);
3727 II->setArgOperand(0, Src1);
3728 II->setArgOperand(1, Src0);
3730 static_cast<int>(SwapPred)));
3776 if (
auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3778 unsigned Width = CmpType->getBitWidth();
3779 unsigned NewWidth = Width;
3787 else if (Width <= 32)
3789 else if (Width <= 64)
3791 else if (Width > 64)
3794 if (Width != NewWidth) {
3797 SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3798 SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3800 SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3801 SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3811 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3813 return replaceInstUsesWith(*II, NewCall);
3820 if (!isa<Constant>(II->getArgOperand(0)))
3823 return replaceInstUsesWith(*II, II->getArgOperand(0));
3831 return eraseInstFromFunction(CI);
3834 Value *Old = II->getArgOperand(0);
3839 if (!BC || !RM || !BM ||
3840 BC->isZeroValue() ||
3841 RM->getZExtValue() != 0xF ||
3842 BM->getZExtValue() != 0xF ||
3843 isa<UndefValue>(Old))
3853 if (
IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3856 if (SS->getNextNonDebugInstruction() == II) {
3857 return eraseInstFromFunction(CI);
3866 bool CannotRemove =
false;
3867 for (++BI; &*BI != TI; ++BI) {
3868 if (isa<AllocaInst>(BI)) {
3869 CannotRemove =
true;
3872 if (
CallInst *BCI = dyn_cast<CallInst>(BI)) {
3876 return eraseInstFromFunction(CI);
3880 if (II->mayHaveSideEffects()) {
3881 CannotRemove =
true;
3887 CannotRemove =
true;
3896 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3897 return eraseInstFromFunction(CI);
3912 Value *IIOperand = II->getArgOperand(0);
3917 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
3918 return eraseInstFromFunction(CI);
3923 Value *AssumeIntrinsic = II->getCalledValue(), *A, *
B;
3925 Builder.CreateCall(AssumeIntrinsic, A, II->getName());
3926 Builder.CreateCall(AssumeIntrinsic, B, II->
getName());
3927 return eraseInstFromFunction(*II);
3931 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(A), II->
getName());
3932 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(B), II->
getName());
3933 return eraseInstFromFunction(*II);
3946 return eraseInstFromFunction(*II);
3957 return eraseInstFromFunction(*II);
3961 AC.updateAffectedValues(II);
3968 Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
3972 if (II->use_empty())
3973 return eraseInstFromFunction(*II);
3979 if (isa<UndefValue>(DerivedPtr))
3983 if (
auto *PT = dyn_cast<PointerType>(II->getType())) {
3987 if (isa<ConstantPointerNull>(DerivedPtr))
4017 Value *NextCond =
nullptr;
4019 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
4020 Value *CurrCond = II->getArgOperand(0);
4023 if (CurrCond == NextCond)
4024 return eraseInstFromFunction(*NextInst);
4028 while (MoveI != NextInst) {
4033 II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
4034 return eraseInstFromFunction(*NextInst);
4039 return visitCallSite(II);
4046 if (
auto *NFI = dyn_cast<FenceInst>(Next))
4048 return eraseInstFromFunction(FI);
4054 return visitCallSite(&II);
4082 Type* DstTy = cast<PointerType>(CI->
getType())->getElementType();
4094 replaceInstUsesWith(*From, With);
4097 eraseInstFromFunction(*
I);
4103 return CI->
use_empty() ? CI : replaceInstUsesWith(*CI, With);
4113 if (Underlying != TrampMem &&
4116 if (!isa<AllocaInst>(Underlying))
4128 InitTrampoline = II;
4138 if (!InitTrampoline)
4142 if (InitTrampoline->
getOperand(0) != TrampMem)
4145 return InitTrampoline;
4158 II->getOperand(0) == TrampMem)
4190 bool Changed =
false;
4199 if (V->getType()->isPointerTy() &&
4208 if (!ArgNos.
empty()) {
4220 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
4223 if (
Function *CalleeF = dyn_cast<Function>(Callee)) {
4226 !CalleeF->isIntrinsic()) {
4239 !CalleeF->isDeclaration()) {
4248 if (isa<CallInst>(OldCall))
4249 return eraseInstFromFunction(*OldCall);
4253 cast<InvokeInst>(OldCall)->setCalledFunction(
4259 if ((isa<ConstantPointerNull>(Callee) &&
4261 isa<UndefValue>(Callee)) {
4284 return transformCallThroughTrampoline(CS, II);
4288 if (FTy->isVarArg()) {
4315 if (I)
return eraseInstFromFunction(*I);
4323 bool InstCombiner::transformConstExprCastCall(
CallSite CS) {
4331 if (
Callee->hasFnAttribute(
"thunk"))
4352 if (OldRetTy != NewRetTy) {
4358 if (
Callee->isDeclaration())
4378 if (
InvokeInst *II = dyn_cast<InvokeInst>(Caller))
4380 if (
PHINode *PN = dyn_cast<PHINode>(U))
4381 if (PN->getParent() == II->getNormalDest() ||
4382 PN->getParent() == II->getUnwindDest())
4386 unsigned NumActualArgs = CS.
arg_size();
4387 unsigned NumCommonArgs = std::min(FT->
getNumParams(), NumActualArgs);
4402 for (
unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
4404 Type *ActTy = (*AI)->getType();
4424 if (DL.getTypeAllocSize(CurElTy) !=
4430 if (
Callee->isDeclaration()) {
4468 ArgAttrs.
reserve(NumActualArgs);
4478 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4481 Value *NewArg = *AI;
4482 if ((*AI)->getType() != ParamTy)
4483 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4492 for (
unsigned i = NumCommonArgs; i != FT->
getNumParams(); ++i) {
4502 for (
unsigned i = FT->
getNumParams(); i != NumActualArgs; ++i, ++AI) {
4504 Value *NewArg = *AI;
4505 if (PTy != (*AI)->getType()) {
4509 NewArg = Builder.CreateCast(opcode, *AI, PTy);
4525 "missing argument attributes");
4534 if (
InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4535 NewCS = Builder.CreateInvoke(
Callee, II->getNormalDest(),
4536 II->getUnwindDest(),
Args, OpBundles);
4538 NewCS = Builder.CreateCall(
Callee, Args, OpBundles);
4540 ->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
4542 NewCS->takeName(Caller);
4550 NewCS->setProfWeight(W);
4562 if (
InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4564 InsertNewInstBefore(NC, *I);
4567 InsertNewInstBefore(NC, *Caller);
4569 Worklist.AddUsersToWorkList(*Caller);
4576 replaceInstUsesWith(*Caller, NV);
4578 if (OldRetTy == NV->
getType())
4586 eraseInstFromFunction(*Caller);
4593 InstCombiner::transformCallThroughTrampoline(
CallSite CS,
4606 "transformCallThroughTrampoline called with incorrect CallSite.");
4612 if (!NestAttrs.isEmpty()) {
4613 unsigned NestArgNo = 0;
4614 Type *NestTy =
nullptr;
4619 E = NestFTy->param_end();
4620 I !=
E; ++NestArgNo, ++
I) {
4621 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4632 std::vector<Value*> NewArgs;
4633 std::vector<AttributeSet> NewArgAttrs;
4634 NewArgs.reserve(CS.
arg_size() + 1);
4635 NewArgAttrs.reserve(CS.
arg_size());
4644 if (ArgNo == NestArgNo) {
4647 if (NestVal->
getType() != NestTy)
4648 NestVal = Builder.CreateBitCast(NestVal, NestTy,
"nest");
4649 NewArgs.push_back(NestVal);
4650 NewArgAttrs.push_back(NestAttr);
4657 NewArgs.push_back(*I);
4658 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
4669 std::vector<Type*> NewTypes;
4670 NewTypes.reserve(FTy->getNumParams()+1);
4677 E = FTy->param_end();
4680 if (ArgNo == NestArgNo)
4682 NewTypes.push_back(NestTy);
4688 NewTypes.push_back(*I);
4705 Attrs.getRetAttributes(), NewArgAttrs);
4711 if (
InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4713 II->getNormalDest(), II->getUnwindDest(),
4714 NewArgs, OpBundles);
4715 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4716 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4719 cast<CallInst>(NewCaller)->setTailCallKind(
4720 cast<CallInst>(Caller)->getTailCallKind());
4721 cast<CallInst>(NewCaller)->setCallingConv(
4722 cast<CallInst>(Caller)->getCallingConv());
4723 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4735 NestF->
getType() == PTy ? NestF :
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
bool isFPPredicate() const
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double, and whose elements are just simple data values (i.e.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
User::op_iterator arg_iterator
The type of iterator to use when looping over actual arguments at this call site. ...
LibCallSimplifier - This class implements a collection of optimizations that replace well formed call...
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
A parsed version of the target data layout string in and methods for querying it. ...
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction, which must be an operator which supports these flags.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
static void ValueIsDeleted(Value *V)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
class_match< UndefValue > m_Undef()
Match an arbitrary undef constant.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static IntegerType * getInt1Ty(LLVMContext &C)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
static Value * simplifyX86immShift(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DiagnosticInfoOptimizationBase::Argument NV
unsigned arg_size() const
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
Atomic ordering constants.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
This class represents lattice values for constants.
Type * getParamType(unsigned i) const
Parameter type accessors.
Constant * getElementAsConstant(unsigned i) const
Return a Constant for a specified index's element.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
bool isInAllocaArgument(unsigned ArgNo) const
Determine whether this argument is passed in an alloca.
A Module instance is used to store all the information related to an LLVM module. ...
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, const char *Name)
Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")' correctly, instead of converting the strin...
An instruction for ordering other memory operations.
static MDString * get(LLVMContext &Context, StringRef Str)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Instruction * visitVACopyInst(VACopyInst &I)
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombiner &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
void push_back(const T &Elt)
static ConstantAggregateZero * get(Type *Ty)
APInt uadd_sat(const APInt &RHS) const
static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC)
This class represents a function call, abstracting a target machine's calling convention.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
iterator_range< IterTy > args() const
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC)
LLVMContext & getContext() const
All values hold a context through their type.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
APInt trunc(unsigned width) const
Truncate to new width.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic, I, at the point in the control-flow identified by the context instruction, CxtI.
STATISTIC(NumFunctions, "Total number of functions")
void setArgOperand(unsigned i, Value *v)
const fltSemantics & getSemantics() const
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
An instruction for reading from memory.
static IntegerType * getInt64Ty(LLVMContext &C)
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
bool isVectorTy() const
True if this is an instance of VectorType.
static OverflowCheckFlavor IntrinsicIDToOverflowCheckFlavor(unsigned ID)
Returns the OverflowCheckFlavor corresponding to a overflow_with_op intrinsic.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
void reserve(size_type N)
void addAttribute(unsigned i, Attribute::AttrKind Kind)
adds the attribute to the list of attributes.
Value * getLength() const
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
static Instruction * simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Instruction * visitVAStartInst(VAStartInst &I)
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
bool isGCRelocate(ImmutableCallSite CS)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op...
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
iterator begin()
Instruction iterator methods.
bool isIdenticalTo(const Instruction *I) const
Return true if the specified instruction is exactly identical to the current one. ...
Value * getArgOperand(unsigned i) const
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
opStatus divide(const APFloat &RHS, roundingMode RM)
static Instruction * SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC)
bool match(Val *V, const Pattern &P)
Instruction * visitInvokeInst(InvokeInst &II)
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, const APFloat &Src2)
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Type * getPointerElementType() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
OverflowCheckFlavor
Specific patterns of overflow check idioms that we match.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static Value * simplifyX86movmsk(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
This is the base class for all instructions that perform data casts.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo=true)
Instruction * eraseInstFromFunction(Instruction &I)
Combiner aware instruction erasure.
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
bool isIntegerTy() const
True if this is an instance of IntegerType.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
The core instruction combiner logic.
static bool isSafeToEliminateVarargsCast(const CallSite CS, const DataLayout &DL, const CastInst *const CI, const int ix)
If this cast does not affect the value passed through the varargs area, we can eliminate the use of t...
void setCalledFunction(Value *Fn)
Sets the function called, including updating the function type.
This file contains the simple types necessary to represent the attributes associated with functions a...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
InstrTy * getInstruction() const
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
void setName(const Twine &Name)
Change the name of the value.
uint64_t getNumElements() const
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
This file implements a class to represent arbitrary precision integral constant values and operations...
All zero aggregate value.
static Value * simplifyX86vpermv(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
ValTy * getCalledValue() const
Return the pointer to function that is being called.
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
DominatorTree & getDominatorTree() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
bool doesNotThrow() const
Determine if the call cannot unwind.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Class to represent function types.
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
Type * getType() const
All values are typed, get the type of this value.
Value * CreateSExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a SExt or Trunc from the integer value V to DestTy.
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
This represents the llvm.va_start intrinsic.
CastClass_match< OpTy, Instruction::FPExt > m_FPExt(const OpTy &Op)
Matches FPExt.
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
AttributeSet getParamAttributes(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
This class represents a no-op cast from one type to another.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
const APInt & getValue() const
Return the constant as an APInt value reference.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
iterator_range< User::op_iterator > arg_operands()
AttrBuilder & remove(const AttrBuilder &B)
Remove the attributes from the builder.
static Value * simplifyX86pack(IntrinsicInst &II, bool IsSigned)
AttributeList getAttributes() const
Return the attribute list for this Function.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
An instruction for storing to memory.
bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
static void ValueIsRAUWd(Value *Old, Value *New)
static Value * simplifyX86vpcom(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder, bool IsSigned)
Decode XOP integer vector comparison intrinsics.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
void takeName(Value *V)
Transfer the name from V to this value.
amdgpu Simplify well known AMD library false Value * Callee
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
This class represents a truncation of integer types.
Type * getElementType() const
Return the element type of the array/vector.
Value * getOperand(unsigned i) const
Class to represent pointers.
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
const DataLayout & getDataLayout() const
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isVoidTy() const
Return true if this is 'void'.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
OneUse_match< T > m_OneUse(const T &SubPattern)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
bool isNegative() const
Determine sign of this APInt.
void setAttributes(AttributeList PAL)
Set the parameter attributes of the call.
Instruction * visitFenceInst(FenceInst &FI)
initializer< Ty > init(const Ty &Val)
static Instruction * simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ConstantInt * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
LLVM_NODISCARD AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
The instances of the Type class are immutable: once they are created, they are never changed...
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
This is an important class for using LLVM in a threaded context.
ConstantInt * getTrue()
Get the constant value for i1 true.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
bool isPointerTy() const
True if this is an instance of PointerType.
static ManagedStatic< OptionRegistry > OR
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
ConstantFP - Floating Point Values [float, double].
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
const Instruction * getNextNonDebugInstruction() const
Return a pointer to the next non-debug instruction in the same basic block as 'this', or nullptr if no such instruction exists.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
bool isFast() const
Determine whether all fast-math-flags are set.
std::underlying_type< E >::type Underlying(E Val)
Check that Val is in range for E, and return Val cast to E's underlying type.
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, CastClass_match< OpTy, Instruction::SExt > > m_ZExtOrSExt(const OpTy &Op)
bool isAllOnes() const
Returns true if value is all one bits.
void setCallingConv(CallingConv::ID CC)
Set the calling convention of the call.
bool isGCResult(ImmutableCallSite CS)
This class represents any memset intrinsic.
static FunctionType * get(Type *Result, ArrayRef< Type *> Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
self_iterator getIterator()
Class to represent integer types.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
const Function * getFunction() const
Return the function this instruction belongs to.
void setAlignment(unsigned Align)
static Constant * getAllOnesValue(Type *Ty)
static Value * simplifyX86varShift(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Value * simplifyX86extrq(IntrinsicInst &II, Value *Op0, ConstantInt *CILength, ConstantInt *CIIndex, InstCombiner::BuilderTy &Builder)
Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding or conversion to a shuffle...
const APFloat & getValueAPF() const
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
bool doesNotThrow() const
Determine if the function cannot unwind.
static BinaryOperator * CreateFNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static Type * getHalfTy(LLVMContext &C)
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
Iterator for intrusive lists based on ilist_node.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
Equivalent to hasAttribute(ArgNo + FirstArgIndex, Kind).
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the generic address space (address sp...
This is the shared class of boolean and integer constants.
BlockVerifier::State From
static Value * simplifyX86vpermilvar(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Attempt to convert vpermilvar* to shufflevector if the mask is constant.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Value * CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, const Twine &Name="")
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static Instruction * canonicalizeConstantArg0ToArg1(CallInst &Call)
Type::subtype_iterator param_iterator
bool overlaps(const AttrBuilder &B) const
Return true if the builder has any attribute that's in the specified builder.
static Value * simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign, InstCombiner::BuilderTy &Builder)
Convert a vector load intrinsic into a simple llvm load instruction.
static Instruction * simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC)
Type * getReturnType() const
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align, Value *Mask)
Create a call to Masked Store intrinsic.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
CallInst * CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
const Value * stripPointerCastsAndInvariantGroups() const
Strip off pointer casts, all-zero GEPs, aliases and invariant group info.
Value * SimplifyCall(ImmutableCallSite CS, const SimplifyQuery &Q)
Given a callsite, fold the result or return null.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
static ConstantInt * getTrue(LLVMContext &Context)
void setOperand(unsigned i, Value *Val)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned getVectorNumElements() const
Class to represent vector types.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Class for arbitrary precision integers.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
iterator_range< user_iterator > users()
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
static Value * simplifyX86pshufb(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Attempt to convert pshufb* to shufflevector if the mask is constant.
static cl::opt< bool > FtzEnabled("nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden, cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."), cl::init(false))
amdgpu Simplify well known AMD library false Value Value * Arg
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate IT block based on arch"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT, "arm-no-restrict-it", "Allow IT blocks based on ARMv7")))
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
opStatus
IEEE-754R 7: Default exception handling.
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
static Value * simplifyMaskedLoad(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
static Instruction * simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC)
unsigned getNumArgOperands() const
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
static bool maskIsAllOneOrUndef(Value *Mask)
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned getIntegerBitWidth() const
LLVM_NODISCARD bool empty() const
StringRef getValueAsString() const
Return the attribute's value as a string.
unsigned greater or equal
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value *> Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
StringRef getName() const
Return a constant reference to the value's name.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
bool doesNotThrow() const
Determine if the call cannot unwind.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc...
Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
Value * optimizeCall(CallInst *CI)
optimizeCall - Take the given call instruction and return a more optimal value to replace the instruc...
static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID, unsigned EndID, InstCombiner &IC)
unsigned getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Type * getValueType() const
static IntrinsicInst * findInitTrampoline(Value *Callee)
bool isByValOrInAllocaArgument(unsigned ArgNo) const
Determine whether this argument is passed by value or in an alloca.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
AssumptionCache & getAssumptionCache() const
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
static Value * simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1, APInt APLength, APInt APIndex, InstCombiner::BuilderTy &Builder)
Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant folding or conversion to a shu...
bool isStatepoint(ImmutableCallSite CS)
static Constant * getNegativeIsTrueBoolVec(ConstantDataVector *V)
Return a constant boolean vector that has true elements in all positions where the input constant dat...
This represents the llvm.va_copy intrinsic.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
static Value * simplifyX86round(IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
bool isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
static Instruction * foldCtpop(IntrinsicInst &II, InstCombiner &IC)
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
void setAlignment(unsigned Align)
This file provides internal interfaces used to implement the InstCombine.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
AttrBuilder typeIncompatible(Type *Ty)
Which attributes cannot be applied to a type.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
AttributeSet getFnAttributes() const
The function attributes are returned.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
bool isNotMinSignedValue() const
Return true if the value is not the smallest signed value.
bool hasOneUse() const
Return true if there is exactly one user of this value.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
AttributeList getAttributes() const
Get the parameter attributes of the call.
unsigned getNumElements() const
Return the number of elements in the array or vector.
bool isConvergent() const
Determine if the call is convergent.
static APInt getNullValue(unsigned numBits)
Get the '0' value.
static Constant * getMul(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static Value * simplifyX86insertps(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
This class represents an extension of floating point types.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
bool isEmpty() const
Return true if there are no attributes.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT)
static IntegerType * getInt8Ty(LLVMContext &C)
void setCalledFunction(Value *V)
Set the callee to the specified value.
Value * getRawDest() const
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Constant * get(ArrayRef< Constant *> V)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Type * getElementType() const
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
PointerType * getType() const
Global values are always pointers.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
bool isLosslessCast() const
A lossless cast is one that does not alter the basic value.
bool isNullValue() const
Determine if all bits are clear.
bool isStructTy() const
True if this is an instance of StructType.
cmpResult compare(const APFloat &RHS) const
A wrapper class for inspecting calls to intrinsic functions.
const BasicBlock * getParent() const
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)