200 using namespace llvm;
202 #define DEBUG_TYPE "msan" 220 cl::desc(
"Track origins (allocation sites) of poisoned memory"),
224 cl::desc(
"keep going after reporting a UMR"),
228 cl::desc(
"poison uninitialized stack variables"),
232 cl::desc(
"poison uninitialized stack variables with a call"),
236 cl::desc(
"poison uninitialized stack variables with the given pattern"),
244 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
248 cl::desc(
"exact handling of relational integer ICmp"),
262 "msan-handle-asm-conservative",
273 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
277 cl::desc(
"print out instructions with default strict semantics"),
281 "msan-instrumentation-with-call-threshold",
283 "If the function being instrumented requires more than " 284 "this number of checks and origin stores, use callbacks instead of " 285 "inline checks (-1 means never use callbacks)."),
290 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
297 cl::desc(
"Insert checks for constant shadow values"),
303 cl::desc(
"Place MSan constructors in comdat sections"),
309 cl::desc(
"Define custom MSan AndMask"),
313 cl::desc(
"Define custom MSan XorMask"),
317 cl::desc(
"Define custom MSan ShadowBase"),
321 cl::desc(
"Define custom MSan OriginBase"),
333 struct MemoryMapParams {
340 struct PlatformMemoryMapParams {
341 const MemoryMapParams *bits32;
342 const MemoryMapParams *bits64;
357 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING 456 class MemorySanitizer {
458 MemorySanitizer(
Module &M,
int TrackOrigins = 0,
bool Recover =
false,
459 bool EnableKmsan =
false) {
460 this->CompileKernel =
465 this->TrackOrigins = this->CompileKernel ? 2 : TrackOrigins;
466 this->Recover =
ClKeepGoing.getNumOccurrences() > 0
468 : (this->CompileKernel | Recover);
473 MemorySanitizer(MemorySanitizer &&) =
delete;
474 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
475 MemorySanitizer(
const MemorySanitizer &) =
delete;
476 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
481 friend struct MemorySanitizerVisitor;
482 friend struct VarArgAMD64Helper;
483 friend struct VarArgMIPS64Helper;
484 friend struct VarArgAArch64Helper;
485 friend struct VarArgPowerPC64Helper;
487 void initializeModule(
Module &M);
488 void initializeCallbacks(
Module &M);
489 void createKernelApi(
Module &M);
490 void createUserspaceApi(
Module &M);
512 Value *ParamOriginTLS;
518 Value *RetvalOriginTLS;
526 Value *VAArgOriginTLS;
530 Value *VAArgOverflowSizeTLS;
537 bool CallbacksInitialized =
false;
548 Value *MsanSetAllocaOrigin4Fn;
551 Value *MsanPoisonStackFn;
555 Value *MsanChainOriginFn;
558 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
561 Value *MsanGetContextStateFn;
564 Value *MsanPoisonAllocaFn, *MsanUnpoisonAllocaFn;
568 Value *MsanMetadataPtrForLoadN, *MsanMetadataPtrForStoreN;
569 Value *MsanMetadataPtrForLoad_1_8[4];
570 Value *MsanMetadataPtrForStore_1_8[4];
571 Value *MsanInstrumentAsmStoreFn;
577 const MemoryMapParams *MapParams;
581 MemoryMapParams CustomMapParams;
586 MDNode *OriginStoreWeights;
597 struct MemorySanitizerLegacyPass :
public FunctionPass {
601 MemorySanitizerLegacyPass(
int TrackOrigins = 0,
bool Recover =
false,
602 bool EnableKmsan =
false)
603 :
FunctionPass(ID), TrackOrigins(TrackOrigins), Recover(Recover),
604 EnableKmsan(EnableKmsan) {}
605 StringRef getPassName()
const override {
return "MemorySanitizerLegacyPass"; }
612 return MSan->sanitizeFunction(
613 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
615 bool doInitialization(
Module &M)
override;
627 MemorySanitizer Msan(*F.
getParent(), TrackOrigins, Recover, EnableKmsan);
636 "MemorySanitizer: detects uninitialized reads.",
false,
640 "MemorySanitizer: detects uninitialized reads.",
false,
645 bool CompileKernel) {
646 return new MemorySanitizerLegacyPass(TrackOrigins, Recover, CompileKernel);
662 void MemorySanitizer::createKernelApi(
Module &M) {
667 RetvalOriginTLS =
nullptr;
669 ParamOriginTLS =
nullptr;
671 VAArgOriginTLS =
nullptr;
672 VAArgOverflowSizeTLS =
nullptr;
682 "__msan_get_context_state",
697 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
698 std::string name_load =
700 std::string name_store =
709 "__msan_metadata_ptr_for_load_n", RetTy,
712 "__msan_metadata_ptr_for_store_n", RetTy,
726 nullptr, Name,
nullptr,
732 void MemorySanitizer::createUserspaceApi(
Module &M) {
737 StringRef WarningFnName = Recover ?
"__msan_warning" 738 :
"__msan_warning_noreturn";
764 VAArgOverflowSizeTLS =
770 unsigned AccessSize = 1 << AccessSizeIndex;
771 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
776 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
791 void MemorySanitizer::initializeCallbacks(
Module &M) {
793 if (CallbacksInitialized)
815 MsanInstrumentAsmStoreFn =
822 createUserspaceApi(M);
824 CallbacksInitialized =
true;
827 Value *MemorySanitizer::getKmsanShadowOriginAccessFn(
bool isStore,
int size) {
829 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
847 void MemorySanitizer::initializeModule(
Module &M) {
850 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
851 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
853 if (ShadowPassed || OriginPassed) {
858 MapParams = &CustomMapParams;
861 switch (TargetTriple.getOS()) {
863 switch (TargetTriple.getArch()) {
875 switch (TargetTriple.getArch()) {
884 switch (TargetTriple.getArch()) {
920 if (!CompileKernel) {
921 std::tie(MsanCtorFunction, std::ignore) =
942 IRB.
getInt32(TrackOrigins),
"__msan_track_origins");
949 IRB.
getInt32(Recover),
"__msan_keep_going");
954 bool MemorySanitizerLegacyPass::doInitialization(
Module &M) {
955 MSan.emplace(M, TrackOrigins, Recover, EnableKmsan);
969 struct VarArgHelper {
970 virtual ~VarArgHelper() =
default;
979 virtual void visitVACopyInst(
VACopyInst &I) = 0;
985 virtual void finalizeInstrumentation() = 0;
988 struct MemorySanitizerVisitor;
993 MemorySanitizerVisitor &Visitor);
996 if (TypeSize <= 8)
return 0;
1008 struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1010 MemorySanitizer &MS;
1013 std::unique_ptr<VarArgHelper> VAHelper;
1020 bool PropagateShadow;
1023 bool CheckReturnValue;
1025 struct ShadowOriginAndInsertPoint {
1031 : Shadow(S), Origin(O), OrigIns(I) {}
1036 MemorySanitizerVisitor(
Function &F, MemorySanitizer &MS,
1040 InsertChecks = SanitizeFunction;
1041 PropagateShadow = SanitizeFunction;
1046 CheckReturnValue = SanitizeFunction && (F.
getName() ==
"main");
1049 if (MS.CompileKernel)
1050 ActualFnStart = insertKmsanPrologue(F);
1055 <<
"MemorySanitizer is not inserting checks into '" 1060 if (MS.TrackOrigins <= 1)
return V;
1061 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1067 if (IntptrSize == kOriginSize)
return Origin;
1068 assert(IntptrSize == kOriginSize * 2);
1075 unsigned Size,
unsigned Alignment) {
1079 assert(IntptrAlignment >= kMinOriginAlignment);
1080 assert(IntptrSize >= kOriginSize);
1083 unsigned CurrentAlignment = Alignment;
1084 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1085 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1086 Value *IntptrOriginPtr =
1088 for (
unsigned i = 0; i < Size / IntptrSize; ++i) {
1093 CurrentAlignment = IntptrAlignment;
1097 for (
unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1106 Value *OriginPtr,
unsigned Alignment,
bool AsCall) {
1108 unsigned OriginAlignment =
std::max(kMinOriginAlignment, Alignment);
1111 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1114 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1115 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
1116 if (ConstantShadow) {
1118 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1123 unsigned TypeSizeInBits =
1127 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
1129 ConvertedShadow, IRB.
getIntNTy(8 * (1 << SizeIndex)));
1135 ConvertedShadow, getCleanShadow(ConvertedShadow),
"_mscmp");
1139 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1145 void materializeStores(
bool InstrumentWithCalls) {
1148 Value *Val =
SI->getValueOperand();
1149 Value *Addr =
SI->getPointerOperand();
1150 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1151 Value *ShadowPtr, *OriginPtr;
1153 unsigned Alignment =
SI->getAlignment();
1154 unsigned OriginAlignment =
std::max(kMinOriginAlignment, Alignment);
1155 std::tie(ShadowPtr, OriginPtr) =
1156 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1163 SI->setOrdering(addReleaseOrdering(
SI->getOrdering()));
1165 if (MS.TrackOrigins && !
SI->isAtomic())
1166 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1167 OriginAlignment, InstrumentWithCalls);
1175 if (MS.CompileKernel) {
1178 if (MS.TrackOrigins) {
1193 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1196 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
1197 if (ConstantShadow) {
1199 insertWarningFn(IRB, Origin);
1209 Value *Fn = MS.MaybeWarningFn[SizeIndex];
1210 Value *ConvertedShadow2 =
1212 IRB.
CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
1217 getCleanShadow(ConvertedShadow),
"_mscmp");
1220 !MS.Recover, MS.ColdCallWeights);
1223 insertWarningFn(IRB, Origin);
1228 void materializeChecks(
bool InstrumentWithCalls) {
1229 for (
const auto &ShadowData : InstrumentationList) {
1231 Value *Shadow = ShadowData.Shadow;
1232 Value *Origin = ShadowData.Origin;
1233 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
1252 MS.VAArgOverflowSizeTLS = IRB.
CreateGEP(
1253 ContextState, {Zero, IRB.
getInt32(4)},
"va_arg_overflow_size");
1256 MS.RetvalOriginTLS =
1276 for (
PHINode *PN : ShadowPHINodes) {
1277 PHINode *PNS = cast<PHINode>(getShadow(PN));
1278 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) :
nullptr;
1279 size_t NumValues = PN->getNumIncomingValues();
1280 for (
size_t v = 0; v < NumValues; v++) {
1281 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1282 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1286 VAHelper->finalizeInstrumentation();
1289 InstrumentationList.
size() + StoreList.
size() >
1293 materializeChecks(InstrumentWithCalls);
1297 materializeStores(InstrumentWithCalls);
1304 return getShadowTy(V->
getType());
1317 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1320 VT->getNumElements());
1322 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1324 AT->getNumElements());
1328 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1329 Elements.
push_back(getShadowTy(
ST->getElementType(i)));
1340 if (
VectorType *vt = dyn_cast<VectorType>(ty))
1348 Type *NoVecTy = getShadowTyNoVec(Ty);
1349 if (Ty == NoVecTy)
return V;
1360 uint64_t AndMask = MS.MapParams->AndMask;
1365 uint64_t XorMask = MS.MapParams->XorMask;
1377 std::pair<Value *, Value *> getShadowOriginPtrUserspace(
Value *Addr,
1380 unsigned Alignment) {
1381 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1382 Value *ShadowLong = ShadowOffset;
1383 uint64_t ShadowBase = MS.MapParams->ShadowBase;
1384 if (ShadowBase != 0) {
1391 Value *OriginPtr =
nullptr;
1392 if (MS.TrackOrigins) {
1393 Value *OriginLong = ShadowOffset;
1394 uint64_t OriginBase = MS.MapParams->OriginBase;
1395 if (OriginBase != 0)
1398 if (Alignment < kMinOriginAlignment) {
1399 uint64_t
Mask = kMinOriginAlignment - 1;
1406 return std::make_pair(ShadowPtr, OriginPtr);
1409 std::pair<Value *, Value *>
1411 unsigned Alignment,
bool isStore) {
1412 Value *ShadowOriginPtrs;
1416 Value *Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1420 ShadowOriginPtrs = IRB.
CreateCall(Getter, AddrCast);
1423 ShadowOriginPtrs = IRB.
CreateCall(isStore ? MS.MsanMetadataPtrForStoreN
1424 : MS.MsanMetadataPtrForLoadN,
1425 {AddrCast, SizeVal});
1431 return std::make_pair(ShadowPtr, OriginPtr);
1434 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1438 std::pair<Value *, Value *> ret;
1439 if (MS.CompileKernel)
1440 ret = getShadowOriginPtrKernel(Addr, IRB, ShadowTy, Alignment, isStore);
1442 ret = getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1461 if (!MS.TrackOrigins)
1480 return MS.RetvalOriginTLS;
1485 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1486 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1491 if (!MS.TrackOrigins)
return;
1492 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1493 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1494 OriginMap[V] = Origin;
1498 Type *ShadowTy = getShadowTy(OrigTy);
1509 return getCleanShadow(V->
getType());
1515 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1517 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1519 getPoisonedShadow(AT->getElementType()));
1524 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1525 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1533 Type *ShadowTy = getShadowTy(V);
1536 return getPoisonedShadow(ShadowTy);
1540 Value *getCleanOrigin() {
1549 if (!PropagateShadow)
return getCleanShadow(V);
1551 if (
I->getMetadata(
"nosanitize"))
1552 return getCleanShadow(V);
1554 Value *Shadow = ShadowMap[V];
1556 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1558 assert(Shadow &&
"No shadow for a value");
1562 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1563 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1564 LLVM_DEBUG(
dbgs() <<
"Undef: " << *U <<
" ==> " << *AllOnes <<
"\n");
1568 if (
Argument *A = dyn_cast<Argument>(V)) {
1570 Value **ShadowPtr = &ShadowMap[V];
1575 unsigned ArgOffset = 0;
1577 for (
auto &FArg : F->
args()) {
1578 if (!FArg.getType()->isSized()) {
1588 Value *
Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1589 if (FArg.hasByValAttr()) {
1593 unsigned ArgAlign = FArg.getParamAlignment();
1594 if (ArgAlign == 0) {
1598 Value *CpShadowPtr =
1599 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1605 EntryIRB.CreateMemSet(
1609 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1610 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
1615 *ShadowPtr = getCleanShadow(V);
1619 *ShadowPtr = getCleanShadow(V);
1622 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
1626 <<
" ARG: " << FArg <<
" ==> " << **ShadowPtr <<
"\n");
1627 if (MS.TrackOrigins && !Overflow) {
1629 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1630 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1632 setOrigin(A, getCleanOrigin());
1635 ArgOffset +=
alignTo(Size, kShadowTLSAlignment);
1637 assert(*ShadowPtr &&
"Could not find shadow for an argument");
1641 return getCleanShadow(V);
1651 if (!MS.TrackOrigins)
return nullptr;
1652 if (!PropagateShadow)
return getCleanOrigin();
1653 if (isa<Constant>(V))
return getCleanOrigin();
1654 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1655 "Unexpected value type in getOrigin()");
1658 return getCleanOrigin();
1660 Value *Origin = OriginMap[V];
1661 assert(Origin &&
"Missing origin");
1676 if (!InsertChecks)
return;
1679 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1680 "Can only insert checks for integer and vector shadow types");
1683 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1692 Value *Shadow, *Origin;
1694 Shadow = getShadow(Val);
1695 if (!Shadow)
return;
1696 Origin = getOrigin(Val);
1698 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1699 if (!Shadow)
return;
1700 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1702 insertShadowCheck(Shadow, Origin, OrigIns);
1754 Type *ShadowTy = getShadowTy(&I);
1756 Value *ShadowPtr, *OriginPtr;
1758 if (PropagateShadow) {
1759 std::tie(ShadowPtr, OriginPtr) =
1760 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
1763 setShadow(&I, getCleanShadow(&I));
1772 if (MS.TrackOrigins) {
1773 if (PropagateShadow) {
1774 unsigned OriginAlignment =
std::max(kMinOriginAlignment, Alignment);
1777 setOrigin(&I, getCleanOrigin());
1793 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1797 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, I.
getType(),
1802 insertShadowCheck(Addr, &I);
1807 if (isa<AtomicCmpXchgInst>(I))
1812 setShadow(&I, getCleanShadow(&I));
1813 setOrigin(&I, getCleanOrigin());
1830 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.
getOperand(1),
1832 setOrigin(&I, getOrigin(&I, 0));
1838 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1840 setOriginForNaryOp(I);
1846 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1848 setOriginForNaryOp(I);
1855 setOrigin(&I, getOrigin(&I, 0));
1861 setOrigin(&I, getOrigin(&I, 0));
1867 setOrigin(&I, getOrigin(&I, 0));
1874 if (
auto *CI = dyn_cast<CallInst>(I.
getOperand(0)))
1875 if (CI->isMustTailCall())
1878 setShadow(&I, IRB.
CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1879 setOrigin(&I, getOrigin(&I, 0));
1884 setShadow(&I, IRB.
CreateIntCast(getShadow(&I, 0), getShadowTy(&I),
false,
1885 "_msprop_ptrtoint"));
1886 setOrigin(&I, getOrigin(&I, 0));
1891 setShadow(&I, IRB.
CreateIntCast(getShadow(&I, 0), getShadowTy(&I),
false,
1892 "_msprop_inttoptr"));
1893 setOrigin(&I, getOrigin(&I, 0));
1896 void visitFPToSIInst(
CastInst& I) { handleShadowOr(I); }
1897 void visitFPToUIInst(
CastInst& I) { handleShadowOr(I); }
1898 void visitSIToFPInst(
CastInst& I) { handleShadowOr(I); }
1899 void visitUIToFPInst(
CastInst& I) { handleShadowOr(I); }
1900 void visitFPExtInst(
CastInst& I) { handleShadowOr(I); }
1901 void visitFPTruncInst(
CastInst& I) { handleShadowOr(I); }
1915 Value *S1 = getShadow(&I, 0);
1916 Value *S2 = getShadow(&I, 1);
1927 setOriginForNaryOp(I);
1937 Value *S1 = getShadow(&I, 0);
1938 Value *S2 = getShadow(&I, 1);
1949 setOriginForNaryOp(I);
1967 template <
bool CombineShadow>
1969 Value *Shadow =
nullptr;
1970 Value *Origin =
nullptr;
1972 MemorySanitizerVisitor *MSV;
1976 : IRB(IRB), MSV(MSV) {}
1980 if (CombineShadow) {
1985 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->
getType());
1986 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
1990 if (MSV->MS.TrackOrigins) {
1997 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
1998 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
2000 IRB.
CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
2010 Value *OpShadow = MSV->getShadow(V);
2011 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2012 return Add(OpShadow, OpOrigin);
2018 if (CombineShadow) {
2020 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2021 MSV->setShadow(I, Shadow);
2023 if (MSV->MS.TrackOrigins) {
2025 MSV->setOrigin(I, Origin);
2035 if (!MS.TrackOrigins)
return;
2037 OriginCombiner
OC(
this, IRB);
2043 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2045 "Vector of pointers is not a valid shadow type");
2056 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2057 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2058 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2075 Type *ShadowTy = getShadowTy(V);
2087 ShadowAndOriginCombiner
SC(
this, IRB);
2110 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2113 const APInt &V = Elt->getValue();
2122 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2123 const APInt &V = Elt->getValue();
2133 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2134 setOrigin(&I, getOrigin(OtherArg));
2140 if (constOp0 && !constOp1)
2141 handleMulByConstant(I, constOp0, I.
getOperand(1));
2142 else if (constOp1 && !constOp0)
2143 handleMulByConstant(I, constOp1, I.
getOperand(0));
2159 setShadow(&I, getShadow(&I, 0));
2160 setOrigin(&I, getOrigin(&I, 0));
2177 void handleEqualityComparison(
ICmpInst &I) {
2181 Value *Sa = getShadow(A);
2182 Value *Sb = getShadow(B);
2208 setOriginForNaryOp(I);
2250 void handleRelationalComparisonExact(
ICmpInst &I) {
2254 Value *Sa = getShadow(A);
2255 Value *Sb = getShadow(B);
2268 getLowestPossibleValue(IRB, A, Sa, IsSigned),
2269 getHighestPossibleValue(IRB, B, Sb, IsSigned));
2271 getHighestPossibleValue(IRB, A, Sa, IsSigned),
2272 getLowestPossibleValue(IRB, B, Sb, IsSigned));
2275 setOriginForNaryOp(I);
2282 void handleSignedRelationalComparison(
ICmpInst &I) {
2286 if ((constOp = dyn_cast<Constant>(I.
getOperand(1)))) {
2289 }
else if ((constOp = dyn_cast<Constant>(I.
getOperand(0)))) {
2304 setShadow(&I, Shadow);
2305 setOrigin(&I, getOrigin(op));
2317 handleEqualityComparison(I);
2323 handleRelationalComparisonExact(I);
2327 handleSignedRelationalComparison(I);
2333 handleRelationalComparisonExact(I);
2348 Value *S1 = getShadow(&I, 0);
2349 Value *S2 = getShadow(&I, 1);
2354 setShadow(&I, IRB.
CreateOr(Shift, S2Conv));
2355 setOriginForNaryOp(I);
2376 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2377 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2378 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2390 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2391 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2392 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2401 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2402 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2403 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2408 VAHelper->visitVAStartInst(I);
2412 VAHelper->visitVACopyInst(I);
2422 Value *Shadow = getShadow(&I, 1);
2423 Value *ShadowPtr, *OriginPtr;
2427 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2428 Addr, IRB, Shadow->
getType(), 1,
true);
2432 insertShadowCheck(Addr, &I);
2435 if (MS.TrackOrigins) IRB.
CreateStore(getOrigin(&I, 1), OriginPtr);
2447 Type *ShadowTy = getShadowTy(&I);
2448 Value *ShadowPtr, *OriginPtr;
2449 if (PropagateShadow) {
2452 unsigned Alignment = 1;
2453 std::tie(ShadowPtr, OriginPtr) =
2454 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2457 setShadow(&I, getCleanShadow(&I));
2461 insertShadowCheck(Addr, &I);
2463 if (MS.TrackOrigins) {
2464 if (PropagateShadow)
2467 setOrigin(&I, getCleanOrigin());
2487 for (
unsigned i = 0; i < NumArgOperands; ++i) {
2494 ShadowAndOriginCombiner
SC(
this, IRB);
2495 for (
unsigned i = 0; i < NumArgOperands; ++i)
2514 if (NumArgOperands == 0)
2517 if (NumArgOperands == 2 &&
2523 return handleVectorStoreIntrinsic(I);
2526 if (NumArgOperands == 1 &&
2531 return handleVectorLoadIntrinsic(I);
2535 if (maybeHandleSimpleNomemIntrinsic(I))
2548 setShadow(&I, IRB.
CreateCall(BswapFunc, getShadow(Op)));
2549 setOrigin(&I, getOrigin(Op));
2567 void handleVectorConvertIntrinsic(
IntrinsicInst &I,
int NumUsedElements) {
2569 Value *CopyOp, *ConvertOp;
2594 Value *ConvertShadow = getShadow(ConvertOp);
2595 Value *AggShadow =
nullptr;
2599 for (
int i = 1; i < NumUsedElements; ++i) {
2602 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
2605 AggShadow = ConvertShadow;
2608 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2615 Value *ResultShadow = getShadow(CopyOp);
2617 for (
int i = 0; i < NumUsedElements; ++i) {
2622 setShadow(&I, ResultShadow);
2623 setOrigin(&I, getOrigin(CopyOp));
2625 setShadow(&I, getCleanShadow(&I));
2626 setOrigin(&I, getCleanOrigin());
2634 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
2637 return CreateShadowCast(IRB, S2, T,
true);
2645 return CreateShadowCast(IRB, S2, T,
true);
2662 void handleVectorShiftIntrinsic(
IntrinsicInst &I,
bool Variable) {
2667 Value *S1 = getShadow(&I, 0);
2668 Value *S2 = getShadow(&I, 1);
2669 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2670 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2674 {IRB.CreateBitCast(S1, V1->
getType()), V2});
2675 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2676 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2677 setOriginForNaryOp(I);
2681 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
2682 const unsigned X86_MMXSizeInBits = 64;
2684 X86_MMXSizeInBits / EltSizeInBits);
2725 void handleVectorPackIntrinsic(
IntrinsicInst &I,
unsigned EltSizeInBits = 0) {
2729 Value *S1 = getShadow(&I, 0);
2730 Value *S2 = getShadow(&I, 1);
2736 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->
getType();
2755 IRB.
CreateCall(ShadowFn, {S1_ext, S2_ext},
"_msprop_vector_pack");
2758 setOriginForNaryOp(I);
2763 const unsigned SignificantBitsPerResultElement = 16;
2766 unsigned ZeroBitsPerResultElement =
2770 Value *S = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2774 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
2777 setOriginForNaryOp(I);
2782 unsigned EltSizeInBits = 0) {
2784 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.
getType();
2786 Value *S = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2792 setOriginForNaryOp(I);
2800 Type *ResTy = getShadowTy(&I);
2801 Value *S0 = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2805 setOriginForNaryOp(I);
2813 Value *S0 = IRB.
CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2814 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2816 setOriginForNaryOp(I);
2824 getShadowOriginPtr(Addr, IRB, Ty, 1,
true)
2831 insertShadowCheck(Addr, &I);
2835 if (!InsertChecks)
return;
2840 unsigned Alignment = 1;
2841 Value *ShadowPtr, *OriginPtr;
2842 std::tie(ShadowPtr, OriginPtr) =
2843 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
2846 insertShadowCheck(Addr, &I);
2850 MS.TrackOrigins ? IRB.
CreateLoad(OriginPtr) : getCleanOrigin();
2851 insertShadowCheck(Shadow, Origin, &I);
2860 Value *Shadow = getShadow(V);
2864 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2868 insertShadowCheck(Addr, &I);
2871 insertShadowCheck(Mask, &I);
2876 if (MS.TrackOrigins) {
2878 paintOrigin(IRB, getOrigin(V), OriginPtr,
2879 DL.getTypeStoreSize(Shadow->
getType()),
2880 std::max(Align, kMinOriginAlignment));
2891 Type *ShadowTy = getShadowTy(&I);
2892 Value *ShadowPtr, *OriginPtr;
2893 if (PropagateShadow) {
2894 std::tie(ShadowPtr, OriginPtr) =
2895 getShadowOriginPtr(Addr, IRB, ShadowTy, Align,
false);
2897 getShadow(PassThru),
"_msmaskedld"));
2899 setShadow(&I, getCleanShadow(&I));
2903 insertShadowCheck(Addr, &I);
2904 insertShadowCheck(Mask, &I);
2907 if (MS.TrackOrigins) {
2908 if (PropagateShadow) {
2924 getOrigin(PassThru), IRB.
CreateLoad(OriginPtr));
2926 setOrigin(&I, Origin);
2928 setOrigin(&I, getCleanOrigin());
2941 handleMaskedStore(I);
2944 handleMaskedLoad(I);
2972 handleVectorConvertIntrinsic(I, 1);
2976 handleVectorConvertIntrinsic(I, 2);
3049 handleVectorShiftIntrinsic(I,
false);
3069 handleVectorShiftIntrinsic(I,
true);
3080 handleVectorPackIntrinsic(I);
3085 handleVectorPackIntrinsic(I, 16);
3089 handleVectorPackIntrinsic(I, 32);
3095 handleVectorSadIntrinsic(I);
3102 handleVectorPmaddIntrinsic(I);
3106 handleVectorPmaddIntrinsic(I, 8);
3110 handleVectorPmaddIntrinsic(I, 16);
3139 handleVectorCompareScalarIntrinsic(I);
3147 handleVectorComparePackedIntrinsic(I);
3152 setShadow(&I, getCleanShadow(&I));
3153 setOrigin(&I, getCleanOrigin());
3157 if (!handleUnknownIntrinsic(I))
3158 visitInstruction(I);
3175 visitAsmInstruction(I);
3177 visitInstruction(I);
3181 assert(!isa<IntrinsicInst>(&I) &&
"intrinsics are handled elsewhere");
3199 unsigned ArgOffset = 0;
3202 ArgIt != End; ++ArgIt) {
3206 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << I <<
"\n");
3214 Value *ArgShadow = getShadow(A);
3215 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
3217 <<
" Shadow: " << *ArgShadow <<
"\n");
3218 bool ArgIsInitialized =
false;
3222 "ByVal argument is not a pointer!");
3224 if (ArgOffset + Size > kParamTLSSize)
break;
3226 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
3228 getShadowOriginPtr(A, IRB, IRB.
getInt8Ty(), Alignment,
3232 Store = IRB.
CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
3237 if (ArgOffset + Size > kParamTLSSize)
break;
3239 kShadowTLSAlignment);
3241 if (Cst && Cst->
isNullValue()) ArgIsInitialized =
true;
3243 if (MS.TrackOrigins && !ArgIsInitialized)
3245 getOriginPtrForArgument(A, IRB, ArgOffset));
3247 assert(Size != 0 && Store !=
nullptr);
3249 ArgOffset +=
alignTo(Size, 8);
3255 VAHelper->visitCallSite(CS, IRB);
3261 if (CS.
isCall() && cast<CallInst>(&
I)->isMustTailCall())
return;
3264 Value *
Base = getShadowPtrForRetval(&I, IRBBefore);
3271 BasicBlock *NormalDest = cast<InvokeInst>(&
I)->getNormalDest();
3276 setShadow(&I, getCleanShadow(&I));
3277 setOrigin(&I, getCleanOrigin());
3284 "Could not find insertion point for retval shadow load");
3287 Value *RetvalShadow =
3288 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
3289 kShadowTLSAlignment,
"_msret");
3290 setShadow(&I, RetvalShadow);
3291 if (MS.TrackOrigins)
3292 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
3295 bool isAMustTailRetVal(
Value *RetVal) {
3296 if (
auto *I = dyn_cast<BitCastInst>(RetVal)) {
3299 if (
auto *I = dyn_cast<CallInst>(RetVal)) {
3308 if (!RetVal)
return;
3310 if (isAMustTailRetVal(RetVal))
return;
3311 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
3312 if (CheckReturnValue) {
3313 insertShadowCheck(RetVal, &I);
3314 Value *Shadow = getCleanShadow(RetVal);
3317 Value *Shadow = getShadow(RetVal);
3319 if (MS.TrackOrigins)
3320 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
3324 void visitPHINode(
PHINode &I) {
3326 if (!PropagateShadow) {
3327 setShadow(&I, getCleanShadow(&I));
3328 setOrigin(&I, getCleanOrigin());
3335 if (MS.TrackOrigins)
3350 StackDescription.
str());
3356 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3358 Value *ShadowBase, *OriginBase;
3359 std::tie(ShadowBase, OriginBase) =
3360 getShadowOriginPtr(&I, IRB, IRB.
getInt8Ty(), 1,
true);
3366 if (PoisonStack && MS.TrackOrigins) {
3367 Value *Descr = getLocalVarDescription(I);
3369 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3370 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
3371 IRB.CreatePointerCast(&F, MS.IntptrTy)});
3376 Value *Descr = getLocalVarDescription(I);
3379 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3380 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
3383 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3388 setShadow(&I, getCleanShadow(&I));
3389 setOrigin(&I, getCleanOrigin());
3397 if (MS.CompileKernel)
3398 instrumentAllocaKmsan(I, IRB, Len);
3400 instrumentAllocaUserspace(I, IRB, Len);
3409 Value *Sb = getShadow(B);
3410 Value *Sc = getShadow(C);
3411 Value *Sd = getShadow(D);
3420 Sa1 = getPoisonedShadow(getShadowTy(I.
getType()));
3428 C = CreateAppToShadowCast(IRB, C);
3429 D = CreateAppToShadowCast(IRB, D);
3436 if (MS.TrackOrigins) {
3458 setShadow(&I, getCleanShadow(&I));
3459 setOrigin(&I, getCleanOrigin());
3463 setShadow(&I, getCleanShadow(&I));
3464 setOrigin(&I, getCleanOrigin());
3468 setShadow(&I, getCleanShadow(&I));
3469 setOrigin(&I, getCleanOrigin());
3480 Value *AggShadow = getShadow(Agg);
3484 setShadow(&I, ResShadow);
3485 setOriginForNaryOp(I);
3498 setOriginForNaryOp(I);
3502 if (
CallInst *CI = dyn_cast<CallInst>(&I)) {
3503 errs() <<
"ZZZ call " << CI->getCalledFunction()->getName() <<
"\n";
3507 errs() <<
"QQQ " << I <<
"\n";
3533 insertShadowCheck(Operand, &I);
3544 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
3549 int NumRetOutputs = 0;
3561 for (
size_t i = 0, n = Constraints.size(); i < n; i++) {
3563 switch (Info.
Type) {
3571 return NumOutputs - NumRetOutputs;
3594 int OutputArgs = getNumOutputArgs(IA, CI);
3600 for (
int i = OutputArgs; i < NumOperands; i++) {
3602 instrumentAsmArgument(Operand, I, IRB, DL,
false);
3607 for (
int i = 0; i < OutputArgs; i++) {
3609 instrumentAsmArgument(Operand, I, IRB, DL,
true);
3612 setShadow(&I, getCleanShadow(&I));
3613 setOrigin(&I, getCleanOrigin());
3624 insertShadowCheck(Operand, &I);
3626 setShadow(&I, getCleanShadow(&I));
3627 setOrigin(&I, getCleanOrigin());
3632 struct VarArgAMD64Helper :
public VarArgHelper {
3635 static const unsigned AMD64GpEndOffset = 48;
3636 static const unsigned AMD64FpEndOffsetSSE = 176;
3638 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
3640 unsigned AMD64FpEndOffset;
3642 MemorySanitizer &MS;
3643 MemorySanitizerVisitor &MSV;
3644 Value *VAArgTLSCopy =
nullptr;
3645 Value *VAArgTLSOriginCopy =
nullptr;
3646 Value *VAArgOverflowSize =
nullptr;
3650 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3652 VarArgAMD64Helper(
Function &F, MemorySanitizer &MS,
3653 MemorySanitizerVisitor &MSV)
3654 :
F(F), MS(MS), MSV(MSV) {
3655 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
3657 if (Attr.isStringAttribute() &&
3658 (Attr.getKindAsString() ==
"target-features")) {
3659 if (Attr.getValueAsString().contains(
"-sse"))
3660 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
3666 ArgKind classifyArgument(
Value* arg) {
3670 return AK_FloatingPoint;
3672 return AK_GeneralPurpose;
3674 return AK_GeneralPurpose;
3687 unsigned GpOffset = 0;
3688 unsigned FpOffset = AMD64GpEndOffset;
3689 unsigned OverflowOffset = AMD64FpEndOffset;
3692 ArgIt != End; ++ArgIt) {
3706 Value *ShadowBase = getShadowPtrForVAArgument(
3707 RealTy, IRB, OverflowOffset,
alignTo(ArgSize, 8));
3708 Value *OriginBase =
nullptr;
3709 if (MS.TrackOrigins)
3710 OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
3711 OverflowOffset +=
alignTo(ArgSize, 8);
3714 Value *ShadowPtr, *OriginPtr;
3715 std::tie(ShadowPtr, OriginPtr) =
3719 IRB.
CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
3720 kShadowTLSAlignment, ArgSize);
3721 if (MS.TrackOrigins)
3722 IRB.
CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
3723 kShadowTLSAlignment, ArgSize);
3725 ArgKind AK = classifyArgument(A);
3726 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
3728 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
3730 Value *ShadowBase, *OriginBase =
nullptr;
3732 case AK_GeneralPurpose:
3734 getShadowPtrForVAArgument(A->
getType(), IRB, GpOffset, 8);
3735 if (MS.TrackOrigins)
3737 getOriginPtrForVAArgument(A->
getType(), IRB, GpOffset);
3740 case AK_FloatingPoint:
3742 getShadowPtrForVAArgument(A->
getType(), IRB, FpOffset, 16);
3743 if (MS.TrackOrigins)
3745 getOriginPtrForVAArgument(A->
getType(), IRB, FpOffset);
3753 getShadowPtrForVAArgument(A->
getType(), IRB, OverflowOffset, 8);
3754 if (MS.TrackOrigins)
3756 getOriginPtrForVAArgument(A->
getType(), IRB, OverflowOffset);
3757 OverflowOffset +=
alignTo(ArgSize, 8);
3766 Value *Shadow = MSV.getShadow(A);
3768 if (MS.TrackOrigins) {
3769 Value *Origin = MSV.getOrigin(A);
3771 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
3772 std::max(kShadowTLSAlignment, kMinOriginAlignment));
3778 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3783 unsigned ArgOffset,
unsigned ArgSize) {
3785 if (ArgOffset + ArgSize > kParamTLSSize)
3807 Value *ShadowPtr, *OriginPtr;
3808 unsigned Alignment = 8;
3809 std::tie(ShadowPtr, OriginPtr) =
3810 MSV.getShadowOriginPtr(VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
3816 24, Alignment,
false);
3824 VAStartInstrumentationList.
push_back(&I);
3825 unpoisonVAListTagForInst(I);
3828 void visitVACopyInst(
VACopyInst &I)
override {
3830 unpoisonVAListTagForInst(I);
3833 void finalizeInstrumentation()
override {
3834 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3835 "finalizeInstrumentation called twice");
3836 if (!VAStartInstrumentationList.
empty()) {
3839 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
3840 VAArgOverflowSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
3845 IRB.
CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
3846 if (MS.TrackOrigins) {
3848 IRB.
CreateMemCpy(VAArgTLSOriginCopy, 8, MS.VAArgOriginTLS, 8, CopySize);
3854 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
3855 CallInst *OrigInst = VAStartInstrumentationList[i];
3864 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
3865 unsigned Alignment = 16;
3866 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
3867 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
3869 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
3871 if (MS.TrackOrigins)
3872 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
3873 Alignment, AMD64FpEndOffset);
3879 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
3880 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
3881 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
3885 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
3887 if (MS.TrackOrigins) {
3890 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
3898 struct VarArgMIPS64Helper :
public VarArgHelper {
3900 MemorySanitizer &MS;
3901 MemorySanitizerVisitor &MSV;
3902 Value *VAArgTLSCopy =
nullptr;
3903 Value *VAArgSize =
nullptr;
3907 VarArgMIPS64Helper(
Function &F, MemorySanitizer &MS,
3908 MemorySanitizerVisitor &MSV) :
F(F), MS(MS), MSV(MSV) {}
3911 unsigned VAArgOffset = 0;
3915 ArgIt != End; ++ArgIt) {
3924 VAArgOffset += (8 - ArgSize);
3926 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
3927 VAArgOffset += ArgSize;
3928 VAArgOffset =
alignTo(VAArgOffset, 8);
3937 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3942 unsigned ArgOffset,
unsigned ArgSize) {
3944 if (ArgOffset + ArgSize > kParamTLSSize)
3954 VAStartInstrumentationList.
push_back(&I);
3956 Value *ShadowPtr, *OriginPtr;
3957 unsigned Alignment = 8;
3958 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3959 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
3961 8, Alignment,
false);
3964 void visitVACopyInst(
VACopyInst &I)
override {
3966 VAStartInstrumentationList.
push_back(&I);
3968 Value *ShadowPtr, *OriginPtr;
3969 unsigned Alignment = 8;
3970 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
3971 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
3973 8, Alignment,
false);
3976 void finalizeInstrumentation()
override {
3977 assert(!VAArgSize && !VAArgTLSCopy &&
3978 "finalizeInstrumentation called twice");
3979 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
3980 VAArgSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
3984 if (!VAStartInstrumentationList.
empty()) {
3988 IRB.
CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
3993 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
3994 CallInst *OrigInst = VAStartInstrumentationList[i];
3997 Value *RegSaveAreaPtrPtr =
4001 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4002 unsigned Alignment = 8;
4003 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4004 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
4006 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4013 struct VarArgAArch64Helper :
public VarArgHelper {
4014 static const unsigned kAArch64GrArgSize = 64;
4015 static const unsigned kAArch64VrArgSize = 128;
4017 static const unsigned AArch64GrBegOffset = 0;
4018 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
4020 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
4021 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
4022 + kAArch64VrArgSize;
4023 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
4026 MemorySanitizer &MS;
4027 MemorySanitizerVisitor &MSV;
4028 Value *VAArgTLSCopy =
nullptr;
4029 Value *VAArgOverflowSize =
nullptr;
4033 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4035 VarArgAArch64Helper(
Function &F, MemorySanitizer &MS,
4036 MemorySanitizerVisitor &MSV) :
F(F), MS(MS), MSV(MSV) {}
4038 ArgKind classifyArgument(
Value* arg) {
4041 return AK_FloatingPoint;
4044 return AK_GeneralPurpose;
4058 unsigned GrOffset = AArch64GrBegOffset;
4059 unsigned VrOffset = AArch64VrBegOffset;
4060 unsigned OverflowOffset = AArch64VAEndOffset;
4064 ArgIt != End; ++ArgIt) {
4068 ArgKind AK = classifyArgument(A);
4069 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
4071 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
4075 case AK_GeneralPurpose:
4076 Base = getShadowPtrForVAArgument(A->
getType(), IRB, GrOffset, 8);
4079 case AK_FloatingPoint:
4080 Base = getShadowPtrForVAArgument(A->
getType(), IRB, VrOffset, 8);
4089 Base = getShadowPtrForVAArgument(A->
getType(), IRB, OverflowOffset,
4091 OverflowOffset +=
alignTo(ArgSize, 8);
4104 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4109 unsigned ArgOffset,
unsigned ArgSize) {
4111 if (ArgOffset + ArgSize > kParamTLSSize)
4121 VAStartInstrumentationList.
push_back(&I);
4123 Value *ShadowPtr, *OriginPtr;
4124 unsigned Alignment = 8;
4125 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4126 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
4128 32, Alignment,
false);
4131 void visitVACopyInst(
VACopyInst &I)
override {
4133 VAStartInstrumentationList.
push_back(&I);
4135 Value *ShadowPtr, *OriginPtr;
4136 unsigned Alignment = 8;
4137 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4138 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
4140 32, Alignment,
false);
4145 Value *SaveAreaPtrPtr =
4155 Value *SaveAreaPtr =
4161 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
4164 void finalizeInstrumentation()
override {
4165 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4166 "finalizeInstrumentation called twice");
4167 if (!VAStartInstrumentationList.
empty()) {
4170 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4171 VAArgOverflowSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
4176 IRB.
CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
4184 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
4185 CallInst *OrigInst = VAStartInstrumentationList[i];
4204 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
4207 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
4208 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
4210 Value *GrRegSaveAreaPtr = IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
4213 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
4214 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
4216 Value *VrRegSaveAreaPtr = IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
4222 Value *GrRegSaveAreaShadowPtrOff =
4223 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
4225 Value *GrRegSaveAreaShadowPtr =
4226 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
4231 GrRegSaveAreaShadowPtrOff);
4232 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
4234 IRB.
CreateMemCpy(GrRegSaveAreaShadowPtr, 8, GrSrcPtr, 8, GrCopySize);
4237 Value *VrRegSaveAreaShadowPtrOff =
4238 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
4240 Value *VrRegSaveAreaShadowPtr =
4241 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
4249 VrRegSaveAreaShadowPtrOff);
4250 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
4252 IRB.
CreateMemCpy(VrRegSaveAreaShadowPtr, 8, VrSrcPtr, 8, VrCopySize);
4255 Value *StackSaveAreaShadowPtr =
4256 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
4260 Value *StackSrcPtr =
4264 IRB.
CreateMemCpy(StackSaveAreaShadowPtr, 16, StackSrcPtr, 16,
4271 struct VarArgPowerPC64Helper :
public VarArgHelper {
4273 MemorySanitizer &MS;
4274 MemorySanitizerVisitor &MSV;
4275 Value *VAArgTLSCopy =
nullptr;
4276 Value *VAArgSize =
nullptr;
4280 VarArgPowerPC64Helper(
Function &F, MemorySanitizer &MS,
4281 MemorySanitizerVisitor &MSV) :
F(F), MS(MS), MSV(MSV) {}
4300 unsigned VAArgOffset = VAArgBase;
4303 ArgIt != End; ++ArgIt) {
4315 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
4317 Value *
Base = getShadowPtrForVAArgument(
4318 RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
4320 Value *AShadowPtr, *AOriginPtr;
4321 std::tie(AShadowPtr, AOriginPtr) =
4322 MSV.getShadowOriginPtr(A, IRB, IRB.
getInt8Ty(),
4325 IRB.
CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
4326 kShadowTLSAlignment, ArgSize);
4329 VAArgOffset +=
alignTo(ArgSize, 8);
4333 uint64_t ArgAlign = 8;
4346 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
4351 VAArgOffset += (8 - ArgSize);
4354 Base = getShadowPtrForVAArgument(A->
getType(), IRB,
4355 VAArgOffset - VAArgBase, ArgSize);
4359 VAArgOffset += ArgSize;
4360 VAArgOffset =
alignTo(VAArgOffset, 8);
4363 VAArgBase = VAArgOffset;
4367 VAArgOffset - VAArgBase);
4370 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4375 unsigned ArgOffset,
unsigned ArgSize) {
4377 if (ArgOffset + ArgSize > kParamTLSSize)
4387 VAStartInstrumentationList.
push_back(&I);
4389 Value *ShadowPtr, *OriginPtr;
4390 unsigned Alignment = 8;
4391 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4392 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
4394 8, Alignment,
false);
4397 void visitVACopyInst(
VACopyInst &I)
override {
4400 Value *ShadowPtr, *OriginPtr;
4401 unsigned Alignment = 8;
4402 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4403 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
4407 8, Alignment,
false);
4410 void finalizeInstrumentation()
override {
4411 assert(!VAArgSize && !VAArgTLSCopy &&
4412 "finalizeInstrumentation called twice");
4413 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4414 VAArgSize = IRB.
CreateLoad(MS.VAArgOverflowSizeTLS);
4418 if (!VAStartInstrumentationList.
empty()) {
4422 IRB.
CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize);
4427 for (
size_t i = 0, n = VAStartInstrumentationList.
size(); i < n; i++) {
4428 CallInst *OrigInst = VAStartInstrumentationList[i];
4431 Value *RegSaveAreaPtrPtr =
4435 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4436 unsigned Alignment = 8;
4437 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4438 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
4440 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4447 struct VarArgNoOpHelper :
public VarArgHelper {
4448 VarArgNoOpHelper(
Function &
F, MemorySanitizer &MS,
4449 MemorySanitizerVisitor &MSV) {}
4455 void visitVACopyInst(
VACopyInst &I)
override {}
4457 void finalizeInstrumentation()
override {}
4463 MemorySanitizerVisitor &Visitor) {
4468 return new VarArgAMD64Helper(Func, Msan, Visitor);
4469 else if (TargetTriple.isMIPS64())
4470 return new VarArgMIPS64Helper(Func, Msan, Visitor);
4472 return new VarArgAArch64Helper(Func, Msan, Visitor);
4475 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
4477 return new VarArgNoOpHelper(Func, Msan, Visitor);
4481 if (!CompileKernel && (&F == MsanCtorFunction))
4483 MemorySanitizerVisitor Visitor(F, *
this, TLI);
4491 return Visitor.runOnFunction();
Value * CreateInBoundsGEP(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &Name="")
Type * getVectorElementType() const
Return a value (possibly void), from a function.
User::op_iterator arg_iterator
The type of iterator to use when looping over actual arguments at this call site. ...
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
A parsed version of the target data layout string in and methods for querying it. ...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const std::string & getTargetTriple() const
Get the target triple which is a string describing the target host.
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
bool doesNotAccessMemory(unsigned OpNo) const
Base class for instruction visitors.
Value * getAggregateOperand()
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Atomic ordering constants.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class represents lattice values for constants.
BinaryOps getOpcode() const
unsigned getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
A Module instance is used to store all the information related to an LLVM module. ...
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, const char *Name)
Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")' correctly, instead of converting the strin...
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
static const MemoryMapParams Linux_I386_MemoryMapParams
Same, but only replaced by something equivalent.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
This class represents zero extension of integer types.
static const unsigned kRetvalTLSSize
unsigned getNumElements() const
Random access to the elements.
void push_back(const T &Elt)
bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI=nullptr, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
This class represents a function call, abstracting a target machine's calling convention.
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static PointerType * getInt32PtrTy(LLVMContext &C, unsigned AS=0)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
const Value * getTrueValue() const
The C convention as implemented on Windows/x86-64 and AArch64.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Like Internal, but omit from symbol table.
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
This instruction constructs a fixed permutation of two input vectors.
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
Externally visible function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A raw_ostream that writes to an SmallVector or SmallString.
This class wraps the llvm.memset intrinsic.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
static unsigned TypeSizeToSizeIndex(unsigned TypeSize)
This class represents a sign extension of integer types.
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
An instruction for reading from memory.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isMustTailCall() const
static Type * getX86_MMXTy(LLVMContext &C)
static cl::opt< unsigned long long > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static const MemoryMapParams Linux_AArch64_MemoryMapParams
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static PointerType * getInt64PtrTy(LLVMContext &C, unsigned AS=0)
static Constant * get(ArrayType *T, ArrayRef< Constant *> V)
unsigned getBitWidth() const
Return the number of bits in the APInt.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Value * CreateNot(Value *V, const Twine &Name="")
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, bool isVolatile=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * getArgOperand(unsigned i) const
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
amdgpu Simplify well known AMD library false Value Value const Twine & Name
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false))
This class represents the LLVM 'select' instruction.
Type * getPointerElementType() const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
FunctionPass * createMemorySanitizerLegacyPassPass(int TrackOrigins=0, bool Recover=false, bool EnableKmsan=false)
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
This is the base class for all instructions that perform data casts.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
'undef' values are things that do not have specified contents.
This class wraps the llvm.memmove intrinsic.
Class to represent struct types.
LLVMContext & getContext() const
Get the global data context.
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static cl::opt< unsigned long long > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
bool isIntegerTy() const
True if this is an instance of IntegerType.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
IntegerType * getIntPtrTy(const DataLayout &DL, unsigned AddrSpace=0)
Fetch the type representing a pointer to an integer value.
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< unsigned long long > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
InstrTy * getInstruction() const
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
void setName(const Twine &Name)
Change the name of the value.
static StructType * get(LLVMContext &Context, ArrayRef< Type *> Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
This file implements a class to represent arbitrary precision integral constant values and operations...
Type * getVoidTy()
Fetch the type representing void.
This class represents a cast from a pointer to an integer.
AtomicOrdering
Atomic ordering for LLVM's memory model.
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Class to represent function types.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Type * getType() const
All values are typed, get the type of this value.
This represents the llvm.va_start intrinsic.
static const char *const kMsanInitName
std::string itostr(int64_t X)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
Class to represent array types.
This instruction compares its operands according to the predicate given to the constructor.
static bool isStore(int Opcode)
void setComdat(Comdat *C)
This class represents a no-op cast from one type to another.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Value * getInsertedValueOperand()
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
AttributeList getAttributes() const
Return the attribute list for this Function.
An instruction for storing to memory.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static const unsigned kParamTLSSize
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber.
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
This class represents a truncation of integer types.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
Value * getOperand(unsigned i) const
Analysis containing CSE Info
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
bool isCall() const
Return true if a CallInst is enclosed.
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
static const unsigned kMinOriginAlignment
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isVoidTy() const
Return true if this is 'void'.
const BasicBlock & getEntryBlock() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
static cl::opt< unsigned long long > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static bool runOnFunction(Function &F, bool PostInlining)
initializer< Ty > init(const Ty &Val)
This instruction inserts a single (scalar) element into a VectorType value.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A set of analyses that are preserved following a run of a transformation pass.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Value * getCalledValue() const
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
const char * getOpcodeName() const
This is an important base class in LLVM.
Resume the propagation of an exception.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Represent the analysis usage information of a pass.
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
This instruction compares its operands according to the predicate given to the constructor.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
FunctionPass class - This class is used to implement most global optimizations.
static const unsigned kShadowTLSAlignment
static FunctionType * get(Type *Result, ArrayRef< Type *> Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Constant * get(StructType *T, ArrayRef< Constant *> V)
Value * getPointerOperand()
bool isX86_MMXTy() const
Return true if this is X86 MMX.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
self_iterator getIterator()
Class to represent integer types.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
This class represents a cast from an integer to a pointer.
const Value * getCondition() const
static Constant * getAllOnesValue(Type *Ty)
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
Comdat * getOrInsertComdat(StringRef Name)
Return the Comdat in the module with the specified name.
INITIALIZE_PASS_BEGIN(MemorySanitizerLegacyPass, "msan", "MemorySanitizer: detects uninitialized reads.", false, false) INITIALIZE_PASS_END(MemorySanitizerLegacyPass
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
const Value * getArraySize() const
Get the number of elements allocated.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
static wasm::ValType getType(const TargetRegisterClass *RC)
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Triple - Helper class for working with autoconf configuration names.
std::vector< ConstraintInfo > ConstraintInfoVector
bool isInvoke() const
Return true if a InvokeInst is enclosed.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateGEP(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &Name="")
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Type * getSequentialElementType() const
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
This is the shared class of boolean and integer constants.
auto size(R &&Range, typename std::enable_if< std::is_same< typename std::iterator_traits< decltype(Range.begin())>::iterator_category, std::random_access_iterator_tag >::value, void >::type *=nullptr) -> decltype(std::distance(Range.begin(), Range.end()))
Get the size of a range.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Module.h This file contains the declarations for the Module class.
Value * CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, const Twine &Name="")
Provides information about what library functions are available for the current target.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
bool isAggregateType() const
Return true if the type is an aggregate type.
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align, Value *Mask)
Create a call to Masked Store intrinsic.
CHAIN = SC CHAIN, Imm128 - System call.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
StringRef str()
Return a StringRef for the vector contents.
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
CallInst * CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
This class wraps the llvm.memcpy intrinsic.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
CallInst * CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static const size_t kNumberOfAccessSizes
static GlobalVariable * createPrivateNonConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned getVectorNumElements() const
Class to represent vector types.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Class for arbitrary precision integers.
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
const Value * getFalseValue() const
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Instruction * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate IT block based on arch"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT, "arm-no-restrict-it", "Allow IT blocks based on ARMv7")))
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
void removeAttributes(unsigned i, const AttrBuilder &Attrs)
removes the attributes from the list of attributes.
Predicate getPredicate() const
Return the predicate for this instruction.
unsigned getNumArgOperands() const
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
bool isInlineAsm() const
Check if this call is an inline asm statement.
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(false))
unsigned getAlignment() const
Return the alignment of the access that is being performed.
LLVM_NODISCARD bool empty() const
StringRef getName() const
Return a constant reference to the value's name.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
bool onlyReadsMemory(unsigned OpNo) const
static const MemoryMapParams Linux_X86_64_MemoryMapParams
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT)
InlineAsm::get - Return the specified uniqued inline asm string.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
static const unsigned kOriginSize
const std::string to_string(const T &Value)
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
Analysis pass providing the TargetLibraryInfo.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
iterator_range< df_iterator< T > > depth_first(const T &G)
This represents the llvm.va_copy intrinsic.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
ArrayRef< unsigned > getIndices() const
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
FunctionType * getFunctionType() const
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned getArgumentNo(Value::const_user_iterator I) const
Given a value use iterator, returns the argument that corresponds to it.
AttributeSet getFnAttributes() const
The function attributes are returned.
BasicBlock::iterator GetInsertPoint() const
BasicBlock * SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Split the specified block at the specified instruction - everything before SplitPt stays in Old and e...
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true))
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
StringRef - Represent a constant reference to a string, i.e.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
A container for analyses that lazily runs them and caches their results.
Type * getArrayElementType() const
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * getPointerOperand()
static IntegerType * getInt8Ty(LLVMContext &C)
static const char *const kMsanModuleCtorName
static Constant * get(ArrayRef< Constant *> V)
iterator_range< arg_iterator > args()
std::pair< Function *, Function * > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type *> InitArgTypes, ArrayRef< Value *> InitArgs, function_ref< void(Function *, Function *)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef())
Creates sanitizer constructor function lazily.
static ConstraintInfoVector ParseConstraints(StringRef ConstraintString)
ParseConstraints - Split up the constraint string into the specific constraints and their prefixes...
bool isArrayTy() const
True if this is an instance of ArrayType.
A wrapper class for inspecting calls to intrinsic functions.
const BasicBlock * getParent() const
an instruction to allocate memory on the stack
This instruction inserts a struct field of array element value into an aggregate value.