24 #define DEBUG_TYPE "instcombine" 28 struct AMDGPUImageDMaskIntrinsic {
32 #define GET_AMDGPUImageDMaskIntrinsicTable_IMPL 33 #include "InstCombineTables.inc" 41 const APInt &Demanded) {
42 assert(I &&
"No instruction?");
43 assert(OpNo < I->getNumOperands() &&
"Operand index too large");
65 bool InstCombiner::SimplifyDemandedInstructionBits(
Instruction &Inst) {
70 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
73 if (V == &Inst)
return true;
74 replaceInstUsesWith(Inst, V);
81 bool InstCombiner::SimplifyDemandedBits(
Instruction *
I,
unsigned OpNo,
82 const APInt &DemandedMask,
86 Value *NewVal = SimplifyDemandedUseBits(U.
get(), DemandedMask, Known,
88 if (!NewVal)
return false;
117 Value *InstCombiner::SimplifyDemandedUseBits(
Value *V,
APInt DemandedMask,
120 assert(V !=
nullptr &&
"Null pointer of Value???");
121 assert(Depth <= 6 &&
"Limit Search Depth");
127 "Value *V, DemandedMask and Known must have same BitWidth");
129 if (isa<Constant>(V)) {
151 return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI);
153 KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth);
166 case Instruction::And: {
168 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
169 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.
Zero, LHSKnown,
173 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
176 APInt IKnownZero = RHSKnown.
Zero | LHSKnown.Zero;
178 APInt IKnownOne = RHSKnown.
One & LHSKnown.One;
182 if (DemandedMask.
isSubsetOf(IKnownZero|IKnownOne))
196 Known.
Zero = std::move(IKnownZero);
197 Known.
One = std::move(IKnownOne);
200 case Instruction::Or: {
202 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
203 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.
One, LHSKnown,
207 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
210 APInt IKnownZero = RHSKnown.
Zero & LHSKnown.Zero;
212 APInt IKnownOne = RHSKnown.
One | LHSKnown.One;
216 if (DemandedMask.
isSubsetOf(IKnownZero|IKnownOne))
230 Known.
Zero = std::move(IKnownZero);
231 Known.
One = std::move(IKnownOne);
234 case Instruction::Xor: {
235 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
236 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1))
239 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
242 APInt IKnownZero = (RHSKnown.
Zero & LHSKnown.Zero) |
243 (RHSKnown.
One & LHSKnown.One);
245 APInt IKnownOne = (RHSKnown.
Zero & LHSKnown.One) |
246 (RHSKnown.
One & LHSKnown.Zero);
250 if (DemandedMask.
isSubsetOf(IKnownZero|IKnownOne))
267 return InsertNewInstWith(Or, *I);
277 ~RHSKnown.
One & DemandedMask);
279 return InsertNewInstWith(And, *I);
292 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
294 isa<ConstantInt>(LHSInst->getOperand(1)) &&
295 (LHSKnown.One & RHSKnown.
One & DemandedMask) != 0) {
296 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
298 APInt NewMask = ~(LHSKnown.One & RHSKnown.
One & DemandedMask);
303 InsertNewInstWith(NewAnd, *I);
307 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
308 return InsertNewInstWith(NewXor, *I);
312 Known.
Zero = std::move(IKnownZero);
314 Known.
One = std::move(IKnownOne);
345 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) ||
346 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1))
349 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
357 Known.
One = RHSKnown.
One & LHSKnown.One;
358 Known.
Zero = RHSKnown.
Zero & LHSKnown.Zero;
361 case Instruction::ZExt:
362 case Instruction::Trunc: {
367 if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1))
371 if (BitWidth > SrcBitWidth)
376 case Instruction::BitCast:
383 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
393 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
397 case Instruction::SExt: {
401 APInt InputDemandedBits = DemandedMask.
trunc(SrcBitWidth);
406 InputDemandedBits.
setBit(SrcBitWidth-1);
409 if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1))
418 return InsertNewInstWith(NewCast, *I);
423 Known = InputKnown.
sext(BitWidth);
428 case Instruction::Sub: {
436 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) ||
438 SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) {
462 bool NSW = cast<OverflowingBinaryOperator>(
I)->hasNoSignedWrap();
464 NSW, LHSKnown, RHSKnown);
467 case Instruction::Shl: {
473 if (
Value *R = simplifyShrShlDemandedBits(Shr, *ShrAmt, I, *SA,
474 DemandedMask, Known))
478 APInt DemandedMaskIn(DemandedMask.
lshr(ShiftAmt));
482 if (IOp->hasNoSignedWrap())
483 DemandedMaskIn.setHighBits(ShiftAmt+1);
484 else if (IOp->hasNoUnsignedWrap())
485 DemandedMaskIn.setHighBits(ShiftAmt);
487 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
490 Known.
Zero <<= ShiftAmt;
491 Known.
One <<= ShiftAmt;
498 case Instruction::LShr: {
504 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
508 if (cast<LShrOperator>(I)->isExact())
511 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
521 case Instruction::AShr: {
530 return InsertNewInstWith(NewVal, *I);
543 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
551 if (cast<AShrOperator>(I)->isExact())
552 DemandedMaskIn.setLowBits(ShiftAmt);
554 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
562 BitWidth, std::min(SignBits + ShiftAmt - 1, BitWidth)));
568 assert(BitWidth > ShiftAmt &&
"Shift amount not saturated?");
569 if (Known.
Zero[BitWidth-ShiftAmt-1] ||
573 LShr->
setIsExact(cast<BinaryOperator>(I)->isExact());
574 return InsertNewInstWith(LShr, *I);
575 }
else if (Known.
One[BitWidth-ShiftAmt-1]) {
576 Known.
One |= HighBits;
581 case Instruction::UDiv: {
586 if (cast<UDivOperator>(I)->isExact())
591 APInt DemandedMaskIn =
593 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Depth + 1))
598 BitWidth, LHSKnown.Zero.countLeadingOnes() + RHSTrailingZeros));
602 case Instruction::SRem:
606 if (Rem->isMinusOne())
608 APInt RA = Rem->getValue().abs();
610 if (DemandedMask.
ult(RA))
613 APInt LowBits = RA - 1;
615 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1))
619 Known.
Zero = LHSKnown.Zero & LowBits;
620 Known.
One = LHSKnown.One & LowBits;
624 if (LHSKnown.isNonNegative() || LowBits.
isSubsetOf(LHSKnown.Zero))
625 Known.
Zero |= ~LowBits;
629 if (LHSKnown.isNegative() && LowBits.
intersects(LHSKnown.One))
630 Known.
One |= ~LowBits;
642 if (LHSKnown.isNonNegative())
646 case Instruction::URem: {
649 if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
650 SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
659 switch (II->getIntrinsicID()) {
673 if (BitWidth-NLZ-NTZ == 8) {
674 unsigned ResultBit = NTZ;
675 unsigned InputBit = BitWidth-NTZ-8;
680 if (InputBit > ResultBit)
681 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
684 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
687 return InsertNewInstWith(NewVal, *I);
701 uint64_t ShiftAmt = SA->
urem(BitWidth);
703 ShiftAmt = BitWidth - ShiftAmt;
705 APInt DemandedMaskLHS(DemandedMask.
lshr(ShiftAmt));
706 APInt DemandedMaskRHS(DemandedMask.
shl(BitWidth - ShiftAmt));
707 if (SimplifyDemandedBits(I, 0, DemandedMaskLHS, LHSKnown, Depth + 1) ||
708 SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Depth + 1))
711 Known.
Zero = LHSKnown.Zero.
shl(ShiftAmt) |
712 RHSKnown.
Zero.
lshr(BitWidth - ShiftAmt);
713 Known.
One = LHSKnown.One.
shl(ShiftAmt) |
714 RHSKnown.
One.
lshr(BitWidth - ShiftAmt);
730 auto Arg = II->getArgOperand(0);
731 auto ArgType = cast<VectorType>(
Arg->
getType());
732 ArgWidth = ArgType->getNumElements();
765 const APInt &DemandedMask,
780 case Instruction::And: {
793 if (DemandedMask.
isSubsetOf(IKnownZero|IKnownOne))
804 Known.
Zero = std::move(IKnownZero);
805 Known.
One = std::move(IKnownOne);
808 case Instruction::Or: {
824 if (DemandedMask.
isSubsetOf(IKnownZero|IKnownOne))
835 Known.
Zero = std::move(IKnownZero);
836 Known.
One = std::move(IKnownOne);
839 case Instruction::Xor: {
849 (RHSKnown.
One & LHSKnown.
One);
852 (RHSKnown.
One & LHSKnown.
Zero);
856 if (DemandedMask.
isSubsetOf(IKnownZero|IKnownOne))
867 Known.
Zero = std::move(IKnownZero);
869 Known.
One = std::move(IKnownOne);
906 InstCombiner::simplifyShrShlDemandedBits(
Instruction *Shr,
const APInt &ShrOp1,
908 const APInt &DemandedMask,
910 if (!ShlOp1 || !ShrOp1)
916 if (ShlOp1.
uge(BitWidth) || ShrOp1.
uge(BitWidth))
924 Known.
Zero &= DemandedMask;
929 bool isLshr = (Shr->
getOpcode() == Instruction::LShr);
930 BitMask1 = isLshr ? (BitMask1.
lshr(ShrAmt) << ShlAmt) :
931 (BitMask1.
ashr(ShrAmt) << ShlAmt);
933 if (ShrAmt <= ShlAmt) {
934 BitMask2 <<= (ShlAmt - ShrAmt);
936 BitMask2 = isLshr ? BitMask2.
lshr(ShrAmt - ShlAmt):
937 BitMask2.
ashr(ShrAmt - ShlAmt);
941 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
942 if (ShrAmt == ShlAmt)
949 if (ShrAmt < ShlAmt) {
951 New = BinaryOperator::CreateShl(VarX, Amt);
957 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
958 BinaryOperator::CreateAShr(VarX, Amt);
959 if (cast<BinaryOperator>(Shr)->isExact())
960 New->setIsExact(
true);
963 return InsertNewInstWith(New, *Shl);
980 bool TFELWEEnabled =
false;
1006 unsigned NewDMaskVal = 0;
1007 unsigned OrigLoadIdx = 0;
1008 for (
unsigned SrcIdx = 0; SrcIdx < 4; ++SrcIdx) {
1009 const unsigned Bit = 1 << SrcIdx;
1010 if (!!(DMaskVal & Bit)) {
1011 if (!!DemandedElts[OrigLoadIdx])
1017 if (DMaskVal != NewDMaskVal)
1026 if (NewNumElts >= VWidth && DemandedElts.
isMask()) {
1041 for (
unsigned i = 0, e = FTy->
getNumParams(); i != e; ++i)
1049 OverloadTys[0] = NewTy;
1057 Args[DMaskIdx] = NewDMask;
1060 Builder.SetInsertPoint(II);
1062 CallInst *NewCall = Builder.CreateCall(NewIntrin, Args);
1066 if (NewNumElts == 1) {
1072 unsigned NewLoadIdx = 0;
1073 for (
unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) {
1074 if (!!DemandedElts[OrigLoadIdx])
1081 Builder.CreateShuffleVector(NewCall,
UndefValue::get(NewTy), EltMask);
1094 Value *InstCombiner::SimplifyDemandedVectorElts(
Value *V,
APInt DemandedElts,
1099 assert((DemandedElts & ~EltMask) == 0 &&
"Invalid DemandedElts!");
1101 if (isa<UndefValue>(V)) {
1103 UndefElts = EltMask;
1108 UndefElts = EltMask;
1114 if (
auto *
C = dyn_cast<Constant>(V)) {
1120 Type *EltTy = cast<VectorType>(V->
getType())->getElementType();
1123 for (
unsigned i = 0; i != VWidth; ++i) {
1124 if (!DemandedElts[i]) {
1126 UndefElts.setBit(i);
1130 Constant *Elt =
C->getAggregateElement(i);
1131 if (!Elt)
return nullptr;
1133 if (isa<UndefValue>(Elt)) {
1135 UndefElts.setBit(i);
1143 return NewCV !=
C ? NewCV :
nullptr;
1162 DemandedElts = EltMask;
1166 if (!I)
return nullptr;
1168 bool MadeChange =
false;
1169 auto simplifyAndSetOp = [&](
Instruction *Inst,
unsigned OpNum,
1173 if (
Value *V = SimplifyDemandedVectorElts(Op, Demanded,
Undef, Depth + 1)) {
1182 APInt UndefElts2(VWidth, 0);
1183 APInt UndefElts3(VWidth, 0);
1187 case Instruction::InsertElement: {
1194 simplifyAndSetOp(I, 0, DemandedElts, UndefElts2);
1201 APInt PreInsertDemandedElts = DemandedElts;
1203 PreInsertDemandedElts.
clearBit(IdxNo);
1205 simplifyAndSetOp(I, 0, PreInsertDemandedElts, UndefElts);
1209 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1215 UndefElts.clearBit(IdxNo);
1218 case Instruction::ShuffleVector: {
1220 unsigned LHSVWidth =
1222 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1223 for (
unsigned i = 0; i < VWidth; i++) {
1224 if (DemandedElts[i]) {
1226 if (MaskVal != -1u) {
1227 assert(MaskVal < LHSVWidth * 2 &&
1228 "shufflevector mask index out of range!");
1229 if (MaskVal < LHSVWidth)
1230 LeftDemanded.setBit(MaskVal);
1232 RightDemanded.
setBit(MaskVal - LHSVWidth);
1237 APInt LHSUndefElts(LHSVWidth, 0);
1238 simplifyAndSetOp(I, 0, LeftDemanded, LHSUndefElts);
1240 APInt RHSUndefElts(LHSVWidth, 0);
1241 simplifyAndSetOp(I, 1, RightDemanded, RHSUndefElts);
1243 bool NewUndefElts =
false;
1244 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1245 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1246 bool LHSUniform =
true;
1247 bool RHSUniform =
true;
1248 for (
unsigned i = 0; i < VWidth; i++) {
1250 if (MaskVal == -1u) {
1251 UndefElts.setBit(i);
1252 }
else if (!DemandedElts[i]) {
1253 NewUndefElts =
true;
1254 UndefElts.setBit(i);
1255 }
else if (MaskVal < LHSVWidth) {
1256 if (LHSUndefElts[MaskVal]) {
1257 NewUndefElts =
true;
1258 UndefElts.setBit(i);
1260 LHSIdx = LHSIdx == -1u ? i : LHSVWidth;
1261 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth;
1262 LHSUniform = LHSUniform && (MaskVal == i);
1265 if (RHSUndefElts[MaskVal - LHSVWidth]) {
1266 NewUndefElts =
true;
1267 UndefElts.setBit(i);
1269 RHSIdx = RHSIdx == -1u ? i : LHSVWidth;
1270 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth;
1271 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i);
1286 if (LHSIdx < LHSVWidth && RHSUniform) {
1287 if (
auto *CV = dyn_cast<ConstantVector>(Shuffle->
getOperand(0))) {
1293 if (RHSIdx < LHSVWidth && LHSUniform) {
1294 if (
auto *CV = dyn_cast<ConstantVector>(Shuffle->
getOperand(1))) {
1305 InsertNewInstWith(New, *Shuffle);
1312 for (
unsigned i = 0; i < VWidth; ++i) {
1334 simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1338 APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1339 if (
auto *CV = dyn_cast<ConstantVector>(Sel->
getCondition())) {
1340 for (
unsigned i = 0; i < VWidth; i++) {
1344 if (isa<ConstantExpr>(CElt))
1350 DemandedLHS.clearBit(i);
1356 simplifyAndSetOp(I, 1, DemandedLHS, UndefElts2);
1357 simplifyAndSetOp(I, 2, DemandedRHS, UndefElts3);
1361 UndefElts = UndefElts2 & UndefElts3;
1364 case Instruction::BitCast: {
1369 APInt InputDemandedElts(InVWidth, 0);
1370 UndefElts2 =
APInt(InVWidth, 0);
1373 if (VWidth == InVWidth) {
1377 InputDemandedElts = DemandedElts;
1378 }
else if ((VWidth % InVWidth) == 0) {
1382 Ratio = VWidth / InVWidth;
1383 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1384 if (DemandedElts[OutIdx])
1385 InputDemandedElts.
setBit(OutIdx / Ratio);
1386 }
else if ((InVWidth % VWidth) == 0) {
1390 Ratio = InVWidth / VWidth;
1391 for (
unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1392 if (DemandedElts[InIdx / Ratio])
1393 InputDemandedElts.
setBit(InIdx);
1399 simplifyAndSetOp(I, 0, InputDemandedElts, UndefElts2);
1401 if (VWidth == InVWidth) {
1402 UndefElts = UndefElts2;
1403 }
else if ((VWidth % InVWidth) == 0) {
1407 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1408 if (UndefElts2[OutIdx / Ratio])
1409 UndefElts.
setBit(OutIdx);
1410 }
else if ((InVWidth % VWidth) == 0) {
1414 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1417 UndefElts.
setBit(OutIdx);
1424 case Instruction::FPTrunc:
1425 case Instruction::FPExt:
1426 simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1439 if (!DemandedElts[0]) {
1446 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1449 UndefElts = UndefElts[0];
1455 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1458 if (!DemandedElts[0]) {
1475 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1478 if (!DemandedElts[0]) {
1485 simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1490 UndefElts.clearBit(0);
1500 APInt DemandedElts2 = DemandedElts;
1502 simplifyAndSetOp(II, 0, DemandedElts2, UndefElts);
1505 if (!DemandedElts[0]) {
1512 simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1516 UndefElts.clearBit(0);
1517 UndefElts |= UndefElts2[0];
1536 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1539 if (!DemandedElts[0]) {
1546 simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1547 simplifyAndSetOp(II, 2, DemandedElts, UndefElts3);
1551 if (!UndefElts2[0] || !UndefElts3[0])
1552 UndefElts.clearBit(0);
1570 assert(VWidth == (InnerVWidth * 2) &&
"Unexpected input size");
1572 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128;
1573 unsigned VWidthPerLane = VWidth / NumLanes;
1574 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes;
1580 for (
int OpNum = 0; OpNum != 2; ++OpNum) {
1581 APInt OpDemandedElts(InnerVWidth, 0);
1582 for (
unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1583 unsigned LaneIdx = Lane * VWidthPerLane;
1584 for (
unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) {
1585 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum;
1586 if (DemandedElts[Idx])
1587 OpDemandedElts.
setBit((Lane * InnerVWidthPerLane) + Elt);
1592 APInt OpUndefElts(InnerVWidth, 0);
1593 simplifyAndSetOp(II, OpNum, OpDemandedElts, OpUndefElts);
1596 OpUndefElts = OpUndefElts.
zext(VWidth);
1597 for (
unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1598 APInt LaneElts = OpUndefElts.
lshr(InnerVWidthPerLane * Lane);
1599 LaneElts = LaneElts.
getLoBits(InnerVWidthPerLane);
1600 LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
1601 UndefElts |= LaneElts;
1621 simplifyAndSetOp(II, 1, DemandedElts, UndefElts);
1631 UndefElts.setHighBits(VWidth / 2);
1639 return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts);
1642 return simplifyAMDGCNMemoryIntrinsicDemanded(
1656 simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1657 simplifyAndSetOp(I, 1, DemandedElts, UndefElts2);
1667 UndefElts &= UndefElts2;
1670 return MadeChange ?
I :
nullptr;
void clearAllBits()
Set every bit to 0.
Type * getVectorElementType() const
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
void setSignBit()
Set the sign bit to 1.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
uint64_t getZExtValue() const
Get zero extended value.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
This class represents lattice values for constants.
Type * getParamType(unsigned i) const
Parameter type accessors.
A Module instance is used to store all the information related to an LLVM module. ...
bool hasConflict() const
Returns true if there is conflicting information.
This class represents zero extension of integer types.
void push_back(const T &Elt)
static ConstantAggregateZero * get(Type *Ty)
APInt zext(unsigned width) const
Zero extend to a new width.
This class represents a function call, abstracting a target machine's calling convention.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
This instruction constructs a fixed permutation of two input vectors.
LLVMContext & getContext() const
All values hold a context through their type.
const Use & getOperandUse(unsigned i) const
APInt trunc(unsigned width) const
Truncate to new width.
void setAllBits()
Set every bit to 1.
void setArgOperand(unsigned i, Value *v)
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getBitWidth() const
Get the bit width of this value.
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Value * getArgOperand(unsigned i) const
SI optimize exec mask operations pre RA
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
bool match(Val *V, const Pattern &P)
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
void setBit(unsigned BitPosition)
Set a given bit to 1.
This class represents the LLVM 'select' instruction.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
This is the base class for all instructions that perform data casts.
static KnownBits computeForAddSub(bool Add, bool NSW, const KnownBits &LHS, KnownBits RHS)
Compute known bits resulting from adding LHS and RHS.
APInt shl(unsigned shiftAmt) const
Left-shift function.
A Use represents the edge between a Value definition and its users.
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs...
uint64_t getNumElements() const
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
unsigned getActiveBits() const
Compute the number of active bits in the value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Class to represent function types.
void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag...
Type * getType() const
All values are typed, get the type of this value.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
const APInt & getValue() const
Return the constant as an APInt value reference.
KnownBits zextOrTrunc(unsigned BitWidth)
Zero extends or truncates the underlying known Zero and One bits.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
void takeName(Value *V)
Transfer the name from V to this value.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Value * getOperand(unsigned i) const
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits...
bool isAllOnesValue() const
Determine if all bits are set.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt...
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned countPopulation() const
Count the number of bits set.
The instances of the Type class are immutable: once they are created, they are never changed...
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
void resetAll()
Resets the known state of all bits.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
bool isMask(unsigned numBits) const
bool isOneValue() const
Determine if this is a value of 1.
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
const Value * getCondition() const
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
static int getMaskValue(const Constant *Mask, unsigned Elt)
Return the shuffle mask value for the specified element of the mask.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
void makeNonNegative()
Make this value non-negative.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag...
unsigned countPopulation(T Value)
Count the number of set bits in a value.
This is the shared class of boolean and integer constants.
SelectPatternFlavor Flavor
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
SelectPatternFlavor
Specific patterns of select instructions we can match.
Type * getReturnType() const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setOperand(unsigned i, Value *Val)
unsigned getVectorNumElements() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Class to represent vector types.
Class for arbitrary precision integers.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
amdgpu Simplify well known AMD library false Value Value * Arg
unsigned getNumArgOperands() const
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
static IntegerType * getInt32Ty(LLVMContext &C)
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
StringRef getName() const
Return a constant reference to the value's name.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
const Function * getParent() const
Return the enclosing method, or null if none.
bool matchIntrinsicType(Type *Ty, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type *> &ArgTys)
Match the specified type (which comes from an intrinsic argument or return value) with the type const...
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
This file provides internal interfaces used to implement the InstCombine.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
KnownBits sext(unsigned BitWidth)
Sign extends the underlying known Zero and One bits.
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
bool hasOneUse() const
Return true if there is exactly one user of this value.
unsigned countLeadingOnes() const
Count the number of leading one bits.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
VectorType * getType() const
Overload to return most specific vector type.
static Constant * get(ArrayRef< Constant *> V)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool isNullValue() const
Determine if all bits are clear.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
A wrapper class for inspecting calls to intrinsic functions.
const BasicBlock * getParent() const