LLVM  8.0.1
SectionMemoryManager.cpp
Go to the documentation of this file.
1 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the section-based memory manager used by the MCJIT
11 // execution engine and RuntimeDyld
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/Config/config.h"
18 #include "llvm/Support/Process.h"
19 
20 namespace llvm {
21 
23  unsigned Alignment,
24  unsigned SectionID,
26  bool IsReadOnly) {
27  if (IsReadOnly)
29  Size, Alignment);
30  return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
31  Alignment);
32 }
33 
35  unsigned Alignment,
36  unsigned SectionID,
38  return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
39  Alignment);
40 }
41 
42 uint8_t *SectionMemoryManager::allocateSection(
44  unsigned Alignment) {
45  if (!Alignment)
46  Alignment = 16;
47 
48  assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
49 
50  uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
51  uintptr_t Addr = 0;
52 
53  MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
54  switch (Purpose) {
56  return CodeMem;
58  return RODataMem;
60  return RWDataMem;
61  }
62  llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
63  }();
64 
65  // Look in the list of free memory regions and use a block there if one
66  // is available.
67  for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
68  if (FreeMB.Free.size() >= RequiredSize) {
69  Addr = (uintptr_t)FreeMB.Free.base();
70  uintptr_t EndOfBlock = Addr + FreeMB.Free.size();
71  // Align the address.
72  Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
73 
74  if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
75  // The part of the block we're giving out to the user is now pending
76  MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
77 
78  // Remember this pending block, such that future allocations can just
79  // modify it rather than creating a new one
80  FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
81  } else {
82  sys::MemoryBlock &PendingMB =
83  MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
84  PendingMB = sys::MemoryBlock(PendingMB.base(),
85  Addr + Size - (uintptr_t)PendingMB.base());
86  }
87 
88  // Remember how much free space is now left in this block
89  FreeMB.Free =
90  sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
91  return (uint8_t *)Addr;
92  }
93  }
94 
95  // No pre-allocated free block was large enough. Allocate a new memory region.
96  // Note that all sections get allocated as read-write. The permissions will
97  // be updated later based on memory group.
98  //
99  // FIXME: It would be useful to define a default allocation size (or add
100  // it as a constructor parameter) to minimize the number of allocations.
101  //
102  // FIXME: Initialize the Near member for each memory group to avoid
103  // interleaving.
104  std::error_code ec;
106  Purpose, RequiredSize, &MemGroup.Near,
108  if (ec) {
109  // FIXME: Add error propagation to the interface.
110  return nullptr;
111  }
112 
113  // Save this address as the basis for our next request
114  MemGroup.Near = MB;
115 
116  // Remember that we allocated this memory
117  MemGroup.AllocatedMem.push_back(MB);
118  Addr = (uintptr_t)MB.base();
119  uintptr_t EndOfBlock = Addr + MB.size();
120 
121  // Align the address.
122  Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
123 
124  // The part of the block we're giving out to the user is now pending
125  MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
126 
127  // The allocateMappedMemory may allocate much more memory than we need. In
128  // this case, we store the unused memory as a free memory block.
129  unsigned FreeSize = EndOfBlock - Addr - Size;
130  if (FreeSize > 16) {
131  FreeMemBlock FreeMB;
132  FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
133  FreeMB.PendingPrefixIndex = (unsigned)-1;
134  MemGroup.FreeMem.push_back(FreeMB);
135  }
136 
137  // Return aligned address
138  return (uint8_t *)Addr;
139 }
140 
141 bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
142  // FIXME: Should in-progress permissions be reverted if an error occurs?
143  std::error_code ec;
144 
145  // Make code memory executable.
146  ec = applyMemoryGroupPermissions(CodeMem,
148  if (ec) {
149  if (ErrMsg) {
150  *ErrMsg = ec.message();
151  }
152  return true;
153  }
154 
155  // Make read-only data memory read-only.
156  ec = applyMemoryGroupPermissions(RODataMem,
158  if (ec) {
159  if (ErrMsg) {
160  *ErrMsg = ec.message();
161  }
162  return true;
163  }
164 
165  // Read-write data memory already has the correct permissions
166 
167  // Some platforms with separate data cache and instruction cache require
168  // explicit cache flush, otherwise JIT code manipulations (like resolved
169  // relocations) will get to the data cache but not to the instruction cache.
171 
172  return false;
173 }
174 
176  static const size_t PageSize = sys::Process::getPageSize();
177 
178  size_t StartOverlap =
179  (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
180 
181  size_t TrimmedSize = M.size();
182  TrimmedSize -= StartOverlap;
183  TrimmedSize -= TrimmedSize % PageSize;
184 
185  sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
186  TrimmedSize);
187 
188  assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
189  assert((Trimmed.size() % PageSize) == 0);
190  assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size());
191 
192  return Trimmed;
193 }
194 
195 std::error_code
196 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
197  unsigned Permissions) {
198  for (sys::MemoryBlock &MB : MemGroup.PendingMem)
199  if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
200  return EC;
201 
202  MemGroup.PendingMem.clear();
203 
204  // Now go through free blocks and trim any of them that don't span the entire
205  // page because one of the pending blocks may have overlapped it.
206  for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
207  FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
208  // We cleared the PendingMem list, so all these pointers are now invalid
209  FreeMB.PendingPrefixIndex = (unsigned)-1;
210  }
211 
212  // Remove all blocks which are now empty
213  MemGroup.FreeMem.erase(
214  remove_if(MemGroup.FreeMem,
215  [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }),
216  MemGroup.FreeMem.end());
217 
218  return std::error_code();
219 }
220 
222  for (sys::MemoryBlock &Block : CodeMem.PendingMem)
224 }
225 
227  for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
228  for (sys::MemoryBlock &Block : Group->AllocatedMem)
229  MMapper.releaseMappedMemory(Block);
230  }
231 }
232 
234 
235 void SectionMemoryManager::anchor() {}
236 
237 namespace {
238 // Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
239 // into sys::Memory.
240 class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
241 public:
243  allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
244  size_t NumBytes, const sys::MemoryBlock *const NearBlock,
245  unsigned Flags, std::error_code &EC) override {
246  return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
247  }
248 
249  std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
250  unsigned Flags) override {
251  return sys::Memory::protectMappedMemory(Block, Flags);
252  }
253 
254  std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
256  }
257 };
258 
259 DefaultMMapper DefaultMMapperInstance;
260 } // namespace
261 
263  : MMapper(MM ? *MM : DefaultMMapperInstance) {}
264 
265 } // namespace llvm
virtual std::error_code releaseMappedMemory(sys::MemoryBlock &M)=0
This method releases a block of memory that was allocated with the allocateMappedMemory method...
This class represents lattice values for constants.
Definition: AllocatorList.h:24
SectionMemoryManager(MemoryMapper *MM=nullptr)
Creates a SectionMemoryManager instance with MM as the associated memory mapper.
static std::error_code releaseMappedMemory(MemoryBlock &Block)
This method releases a block of memory that was allocated with the allocateMappedMemory method...
static MemoryBlock allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)
This method allocates a block of memory that is suitable for loading dynamically generated code (e...
Implementations of this interface are used by SectionMemoryManager to request pages from the operatin...
void * base() const
Definition: Memory.h:33
static void InvalidateInstructionCache(const void *Addr, size_t Len)
InvalidateInstructionCache - Before the JIT can run a block of code that has been emitted it must inv...
AllocationPurpose
This enum describes the various reasons to allocate pages from allocateMappedMemory.
virtual void invalidateInstructionCache()
Invalidate instruction cache for code sections.
uint8_t * allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) override
Allocates a memory block of (at least) the given size suitable for executable code.
uint8_t * allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool isReadOnly) override
Allocates a memory block of (at least) the given size suitable for executable code.
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1226
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getPageSize()
static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M)
This class encapsulates the notion of a memory block which has an address and a size.
Definition: Memory.h:29
virtual sys::MemoryBlock allocateMappedMemory(AllocationPurpose Purpose, size_t NumBytes, const sys::MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)=0
This method attempts to allocate NumBytes bytes of virtual memory for Purpose.
Provides a library for accessing information about this process and other processes on the operating ...
bool finalizeMemory(std::string *ErrMsg=nullptr) override
Update section-specific memory permissions and other attributes.
uint32_t Size
Definition: Profile.cpp:47
virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block, unsigned Flags)=0
This method sets the protection flags for a block of memory to the state specified by Flags...
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
size_t size() const
Definition: Memory.h:34
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
static std::error_code protectMappedMemory(const MemoryBlock &Block, unsigned Flags)
This method sets the protection flags for a block of memory to the state specified by /p Flags...