LLDB  mainline
Memory.cpp
Go to the documentation of this file.
1 //===-- Memory.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Target/Memory.h"
10 #include "lldb/Target/Process.h"
12 #include "lldb/Utility/LLDBLog.h"
13 #include "lldb/Utility/Log.h"
14 #include "lldb/Utility/RangeMap.h"
15 #include "lldb/Utility/State.h"
16 
17 #include <cinttypes>
18 #include <memory>
19 
20 using namespace lldb;
21 using namespace lldb_private;
22 
23 // MemoryCache constructor
24 MemoryCache::MemoryCache(Process &process)
25  : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
26  m_process(process),
27  m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {}
28 
29 // Destructor
30 MemoryCache::~MemoryCache() = default;
31 
32 void MemoryCache::Clear(bool clear_invalid_ranges) {
33  std::lock_guard<std::recursive_mutex> guard(m_mutex);
34  m_L1_cache.clear();
35  m_L2_cache.clear();
36  if (clear_invalid_ranges)
39 }
40 
41 void MemoryCache::AddL1CacheData(lldb::addr_t addr, const void *src,
42  size_t src_len) {
44  addr, DataBufferSP(new DataBufferHeap(DataBufferHeap(src, src_len))));
45 }
46 
48  const DataBufferSP &data_buffer_sp) {
49  std::lock_guard<std::recursive_mutex> guard(m_mutex);
50  m_L1_cache[addr] = data_buffer_sp;
51 }
52 
53 void MemoryCache::Flush(addr_t addr, size_t size) {
54  if (size == 0)
55  return;
56 
57  std::lock_guard<std::recursive_mutex> guard(m_mutex);
58 
59  // Erase any blocks from the L1 cache that intersect with the flush range
60  if (!m_L1_cache.empty()) {
61  AddrRange flush_range(addr, size);
62  BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
63  if (pos != m_L1_cache.begin()) {
64  --pos;
65  }
66  while (pos != m_L1_cache.end()) {
67  AddrRange chunk_range(pos->first, pos->second->GetByteSize());
68  if (!chunk_range.DoesIntersect(flush_range))
69  break;
70  pos = m_L1_cache.erase(pos);
71  }
72  }
73 
74  if (!m_L2_cache.empty()) {
75  const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
76  const addr_t end_addr = (addr + size - 1);
77  const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
78  const addr_t last_cache_line_addr =
79  end_addr - (end_addr % cache_line_byte_size);
80  // Watch for overflow where size will cause us to go off the end of the
81  // 64 bit address space
82  uint32_t num_cache_lines;
83  if (last_cache_line_addr >= first_cache_line_addr)
84  num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) /
85  cache_line_byte_size) +
86  1;
87  else
88  num_cache_lines =
89  (UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size;
90 
91  uint32_t cache_idx = 0;
92  for (addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines;
93  curr_addr += cache_line_byte_size, ++cache_idx) {
94  BlockMap::iterator pos = m_L2_cache.find(curr_addr);
95  if (pos != m_L2_cache.end())
96  m_L2_cache.erase(pos);
97  }
98  }
99 }
100 
102  lldb::addr_t byte_size) {
103  if (byte_size > 0) {
104  std::lock_guard<std::recursive_mutex> guard(m_mutex);
105  InvalidRanges::Entry range(base_addr, byte_size);
106  m_invalid_ranges.Append(range);
108  }
109 }
110 
112  lldb::addr_t byte_size) {
113  if (byte_size > 0) {
114  std::lock_guard<std::recursive_mutex> guard(m_mutex);
116  if (idx != UINT32_MAX) {
118  if (entry->GetRangeBase() == base_addr &&
119  entry->GetByteSize() == byte_size)
121  }
122  }
123  return false;
124 }
125 
126 size_t MemoryCache::Read(addr_t addr, void *dst, size_t dst_len,
127  Status &error) {
128  size_t bytes_left = dst_len;
129 
130  // Check the L1 cache for a range that contain the entire memory read. If we
131  // find a range in the L1 cache that does, we use it. Else we fall back to
132  // reading memory in m_L2_cache_line_byte_size byte sized chunks. The L1
133  // cache contains chunks of memory that are not required to be
134  // m_L2_cache_line_byte_size bytes in size, so we don't try anything tricky
135  // when reading from them (no partial reads from the L1 cache).
136 
137  std::lock_guard<std::recursive_mutex> guard(m_mutex);
138  if (!m_L1_cache.empty()) {
139  AddrRange read_range(addr, dst_len);
140  BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
141  if (pos != m_L1_cache.begin()) {
142  --pos;
143  }
144  AddrRange chunk_range(pos->first, pos->second->GetByteSize());
145  if (chunk_range.Contains(read_range)) {
146  memcpy(dst, pos->second->GetBytes() + (addr - chunk_range.GetRangeBase()),
147  dst_len);
148  return dst_len;
149  }
150  }
151 
152  // If this memory read request is larger than the cache line size, then we
153  // (1) try to read as much of it at once as possible, and (2) don't add the
154  // data to the memory cache. We don't want to split a big read up into more
155  // separate reads than necessary, and with a large memory read request, it is
156  // unlikely that the caller function will ask for the next
157  // 4 bytes after the large memory read - so there's little benefit to saving
158  // it in the cache.
159  if (dst && dst_len > m_L2_cache_line_byte_size) {
160  size_t bytes_read =
161  m_process.ReadMemoryFromInferior(addr, dst, dst_len, error);
162  // Add this non block sized range to the L1 cache if we actually read
163  // anything
164  if (bytes_read > 0)
165  AddL1CacheData(addr, dst, bytes_read);
166  return bytes_read;
167  }
168 
169  if (dst && bytes_left > 0) {
170  const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
171  uint8_t *dst_buf = (uint8_t *)dst;
172  addr_t curr_addr = addr - (addr % cache_line_byte_size);
173  addr_t cache_offset = addr - curr_addr;
174 
175  while (bytes_left > 0) {
176  if (m_invalid_ranges.FindEntryThatContains(curr_addr)) {
177  error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64,
178  curr_addr);
179  return dst_len - bytes_left;
180  }
181 
182  BlockMap::const_iterator pos = m_L2_cache.find(curr_addr);
183  BlockMap::const_iterator end = m_L2_cache.end();
184 
185  if (pos != end) {
186  size_t curr_read_size = cache_line_byte_size - cache_offset;
187  if (curr_read_size > bytes_left)
188  curr_read_size = bytes_left;
189 
190  memcpy(dst_buf + dst_len - bytes_left,
191  pos->second->GetBytes() + cache_offset, curr_read_size);
192 
193  bytes_left -= curr_read_size;
194  curr_addr += curr_read_size + cache_offset;
195  cache_offset = 0;
196 
197  if (bytes_left > 0) {
198  // Get sequential cache page hits
199  for (++pos; (pos != end) && (bytes_left > 0); ++pos) {
200  assert((curr_addr % cache_line_byte_size) == 0);
201 
202  if (pos->first != curr_addr)
203  break;
204 
205  curr_read_size = pos->second->GetByteSize();
206  if (curr_read_size > bytes_left)
207  curr_read_size = bytes_left;
208 
209  memcpy(dst_buf + dst_len - bytes_left, pos->second->GetBytes(),
210  curr_read_size);
211 
212  bytes_left -= curr_read_size;
213  curr_addr += curr_read_size;
214 
215  // We have a cache page that succeeded to read some bytes but not
216  // an entire page. If this happens, we must cap off how much data
217  // we are able to read...
218  if (pos->second->GetByteSize() != cache_line_byte_size)
219  return dst_len - bytes_left;
220  }
221  }
222  }
223 
224  // We need to read from the process
225 
226  if (bytes_left > 0) {
227  assert((curr_addr % cache_line_byte_size) == 0);
228  std::unique_ptr<DataBufferHeap> data_buffer_heap_up(
229  new DataBufferHeap(cache_line_byte_size, 0));
230  size_t process_bytes_read = m_process.ReadMemoryFromInferior(
231  curr_addr, data_buffer_heap_up->GetBytes(),
232  data_buffer_heap_up->GetByteSize(), error);
233  if (process_bytes_read == 0)
234  return dst_len - bytes_left;
235 
236  if (process_bytes_read != cache_line_byte_size) {
237  if (process_bytes_read < data_buffer_heap_up->GetByteSize()) {
238  dst_len -= data_buffer_heap_up->GetByteSize() - process_bytes_read;
239  bytes_left = process_bytes_read;
240  }
241  data_buffer_heap_up->SetByteSize(process_bytes_read);
242  }
243  m_L2_cache[curr_addr] = DataBufferSP(data_buffer_heap_up.release());
244  // We have read data and put it into the cache, continue through the
245  // loop again to get the data out of the cache...
246  }
247  }
248  }
249 
250  return dst_len - bytes_left;
251 }
252 
254  uint32_t permissions, uint32_t chunk_size)
255  : m_range(addr, byte_size), m_permissions(permissions),
256  m_chunk_size(chunk_size)
257 {
258  // The entire address range is free to start with.
260  assert(byte_size > chunk_size);
261 }
262 
264 
266  // We must return something valid for zero bytes.
267  if (size == 0)
268  size = 1;
269  Log *log = GetLog(LLDBLog::Process);
270 
271  const size_t free_count = m_free_blocks.GetSize();
272  for (size_t i=0; i<free_count; ++i)
273  {
274  auto &free_block = m_free_blocks.GetEntryRef(i);
275  const lldb::addr_t range_size = free_block.GetByteSize();
276  if (range_size >= size)
277  {
278  // We found a free block that is big enough for our data. Figure out how
279  // many chunks we will need and calculate the resulting block size we
280  // will reserve.
281  addr_t addr = free_block.GetRangeBase();
282  size_t num_chunks = CalculateChunksNeededForSize(size);
283  lldb::addr_t block_size = num_chunks * m_chunk_size;
284  lldb::addr_t bytes_left = range_size - block_size;
285  if (bytes_left == 0)
286  {
287  // The newly allocated block will take all of the bytes in this
288  // available block, so we can just add it to the allocated ranges and
289  // remove the range from the free ranges.
290  m_reserved_blocks.Insert(free_block, false);
292  }
293  else
294  {
295  // Make the new allocated range and add it to the allocated ranges.
296  Range<lldb::addr_t, uint32_t> reserved_block(free_block);
297  reserved_block.SetByteSize(block_size);
298  // Insert the reserved range and don't combine it with other blocks in
299  // the reserved blocks list.
300  m_reserved_blocks.Insert(reserved_block, false);
301  // Adjust the free range in place since we won't change the sorted
302  // ordering of the m_free_blocks list.
303  free_block.SetRangeBase(reserved_block.GetRangeEnd());
304  free_block.SetByteSize(bytes_left);
305  }
306  LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size, addr);
307  return addr;
308  }
309  }
310 
311  LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size,
313  return LLDB_INVALID_ADDRESS;
314 }
315 
317  bool success = false;
318  auto entry_idx = m_reserved_blocks.FindEntryIndexThatContains(addr);
319  if (entry_idx != UINT32_MAX)
320  {
323  success = true;
324  }
325  Log *log = GetLog(LLDBLog::Process);
326  LLDB_LOGV(log, "({0}) (addr = {1:x}) => {2}", this, addr, success);
327  return success;
328 }
329 
331  : m_process(process), m_mutex(), m_memory_map() {}
332 
334 
336  std::lock_guard<std::recursive_mutex> guard(m_mutex);
337  if (m_process.IsAlive()) {
338  PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
339  for (pos = m_memory_map.begin(); pos != end; ++pos)
340  m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
341  }
342  m_memory_map.clear();
343 }
344 
347  uint32_t chunk_size, Status &error) {
348  AllocatedBlockSP block_sp;
349  const size_t page_size = 4096;
350  const size_t num_pages = (byte_size + page_size - 1) / page_size;
351  const size_t page_byte_size = num_pages * page_size;
352 
353  addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
354 
355  Log *log = GetLog(LLDBLog::Process);
356  if (log) {
357  LLDB_LOGF(log,
358  "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
359  ", permissions = %s) => 0x%16.16" PRIx64,
360  (uint32_t)page_byte_size, GetPermissionsAsCString(permissions),
361  (uint64_t)addr);
362  }
363 
364  if (addr != LLDB_INVALID_ADDRESS) {
365  block_sp = std::make_shared<AllocatedBlock>(addr, page_byte_size,
366  permissions, chunk_size);
367  m_memory_map.insert(std::make_pair(permissions, block_sp));
368  }
369  return block_sp;
370 }
371 
373  uint32_t permissions,
374  Status &error) {
375  std::lock_guard<std::recursive_mutex> guard(m_mutex);
376 
378  std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator>
379  range = m_memory_map.equal_range(permissions);
380 
381  for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second;
382  ++pos) {
383  addr = (*pos).second->ReserveBlock(byte_size);
384  if (addr != LLDB_INVALID_ADDRESS)
385  break;
386  }
387 
388  if (addr == LLDB_INVALID_ADDRESS) {
389  AllocatedBlockSP block_sp(AllocatePage(byte_size, permissions, 16, error));
390 
391  if (block_sp)
392  addr = block_sp->ReserveBlock(byte_size);
393  }
394  Log *log = GetLog(LLDBLog::Process);
395  LLDB_LOGF(log,
396  "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
397  ", permissions = %s) => 0x%16.16" PRIx64,
398  (uint32_t)byte_size, GetPermissionsAsCString(permissions),
399  (uint64_t)addr);
400  return addr;
401 }
402 
404  std::lock_guard<std::recursive_mutex> guard(m_mutex);
405 
406  PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
407  bool success = false;
408  for (pos = m_memory_map.begin(); pos != end; ++pos) {
409  if (pos->second->Contains(addr)) {
410  success = pos->second->FreeBlock(addr);
411  break;
412  }
413  }
414  Log *log = GetLog(LLDBLog::Process);
415  LLDB_LOGF(log,
416  "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
417  ") => %i",
418  (uint64_t)addr, success);
419  return success;
420 }
lldb_private::AllocatedBlock::ReserveBlock
lldb::addr_t ReserveBlock(uint32_t size)
Definition: Memory.cpp:265
lldb_private::Range::GetRangeBase
BaseType GetRangeBase() const
Definition: RangeMap.h:46
lldb_private::AllocatedMemoryCache::~AllocatedMemoryCache
~AllocatedMemoryCache()
lldb_private::Range::DoesIntersect
bool DoesIntersect(const Range &rhs) const
Definition: RangeMap.h:112
lldb_private::RangeVector::FindEntryIndexThatContains
uint32_t FindEntryIndexThatContains(B addr) const
Definition: RangeMap.h:311
lldb_private::RangeVector::Sort
void Sort()
Definition: RangeMap.h:212
lldb_private::Process::DoAllocateMemory
virtual lldb::addr_t DoAllocateMemory(size_t size, uint32_t permissions, Status &error)
Actually allocate memory in the process.
Definition: Process.h:1633
lldb_private::MemoryCache::m_L1_cache
BlockMap m_L1_cache
Definition: Memory.h:52
lldb_private::MemoryCache::m_process
Process & m_process
Definition: Memory.h:58
lldb_private::AllocatedBlock::AllocatedBlock
AllocatedBlock(lldb::addr_t addr, uint32_t byte_size, uint32_t permissions, uint32_t chunk_size)
Definition: Memory.cpp:253
lldb_private::MemoryCache::RemoveInvalidRange
bool RemoveInvalidRange(lldb::addr_t base_addr, lldb::addr_t byte_size)
Definition: Memory.cpp:111
LLDB_LOGF
#define LLDB_LOGF(log,...)
Definition: Log.h:343
lldb_private::Process
Definition: Process.h:338
lldb_private::AllocatedBlock::m_chunk_size
const uint32_t m_chunk_size
Definition: Memory.h:102
lldb_private::RangeVector::GetEntryRef
Entry & GetEntryRef(size_t i)
Definition: RangeMap.h:298
lldb_private::AllocatedMemoryCache::DeallocateMemory
bool DeallocateMemory(lldb::addr_t ptr)
Definition: Memory.cpp:403
lldb_private::Process::IsAlive
virtual bool IsAlive()
Check if a process is still alive.
Definition: Process.cpp:1084
lldb_private::AllocatedMemoryCache::AllocatedMemoryCache
AllocatedMemoryCache(Process &process)
Definition: Memory.cpp:330
lldb_private::AllocatedBlock::m_reserved_blocks
RangeVector< lldb::addr_t, uint32_t > m_reserved_blocks
Definition: Memory.h:106
lldb::addr_t
uint64_t addr_t
Definition: lldb-types.h:83
lldb_private::RangeVector::Clear
void Clear()
Definition: RangeMap.h:284
RangeMap.h
LLDB_LOGV
#define LLDB_LOGV(log,...)
Definition: Log.h:350
lldb_private::MemoryCache::Flush
void Flush(lldb::addr_t addr, size_t size)
Definition: Memory.cpp:53
lldb_private::RangeVector::Append
void Append(const Entry &entry)
Definition: RangeMap.h:174
lldb_private::GetPermissionsAsCString
const char * GetPermissionsAsCString(uint32_t permissions)
Definition: State.cpp:44
lldb_private::Process::ReadMemoryFromInferior
size_t ReadMemoryFromInferior(lldb::addr_t vm_addr, void *buf, size_t size, Status &error)
Read of memory from a process.
Definition: Process.cpp:2033
Process.h
error
static llvm::raw_ostream & error(Stream &strm)
Definition: CommandReturnObject.cpp:17
lldb_private::MemoryCache::m_mutex
std::recursive_mutex m_mutex
Definition: Memory.h:51
Log.h
lldb_private::AllocatedMemoryCache::m_process
Process & m_process
Definition: Memory.h:133
lldb_private::AllocatedBlock::CalculateChunksNeededForSize
uint32_t CalculateChunksNeededForSize(uint32_t size) const
Definition: Memory.h:94
lldb_private::LLDBLog::Process
@ Process
lldb_private::Range< lldb::addr_t, lldb::addr_t >
lldb_private::AllocatedBlock::m_range
Range< lldb::addr_t, uint32_t > m_range
Definition: Memory.h:98
lldb_private::RangeVector::Insert
void Insert(const Entry &entry, bool combine)
Definition: RangeMap.h:180
lldb_private::MemoryCache::AddInvalidRange
void AddInvalidRange(lldb::addr_t base_addr, lldb::addr_t byte_size)
Definition: Memory.cpp:101
lldb_private::AllocatedMemoryCache::m_mutex
std::recursive_mutex m_mutex
Definition: Memory.h:134
lldb_private::AllocatedBlock::FreeBlock
bool FreeBlock(lldb::addr_t addr)
Definition: Memory.cpp:316
lldb_private::AllocatedMemoryCache::AllocateMemory
lldb::addr_t AllocateMemory(size_t byte_size, uint32_t permissions, Status &error)
Definition: Memory.cpp:372
lldb_private::MemoryCache::m_invalid_ranges
InvalidRanges m_invalid_ranges
Definition: Memory.h:57
lldb_private::Status
Definition: Status.h:44
lldb_private::MemoryCache::~MemoryCache
~MemoryCache()
uint32_t
lldb_private::RangeVector::RemoveEntryAtIndex
bool RemoveEntryAtIndex(uint32_t idx)
Definition: RangeMap.h:204
lldb_private::AllocatedMemoryCache::AllocatePage
AllocatedBlockSP AllocatePage(uint32_t byte_size, uint32_t permissions, uint32_t chunk_size, Status &error)
Definition: Memory.cpp:346
lldb_private::MemoryCache::Read
size_t Read(lldb::addr_t addr, void *dst, size_t dst_len, Status &error)
Definition: Memory.cpp:126
Memory.h
lldb_private::AllocatedMemoryCache::m_memory_map
PermissionsToBlockMap m_memory_map
Definition: Memory.h:136
lldb_private::MemoryCache::m_L2_cache_line_byte_size
uint32_t m_L2_cache_line_byte_size
Definition: Memory.h:59
lldb_private::MemoryCache::m_L2_cache
BlockMap m_L2_cache
Definition: Memory.h:55
lldb_private::Range::GetByteSize
SizeType GetByteSize() const
Definition: RangeMap.h:82
UINT32_MAX
#define UINT32_MAX
Definition: lldb-defines.h:19
lldb_private::Range::SetByteSize
void SetByteSize(SizeType s)
Definition: RangeMap.h:84
LLDB_INVALID_ADDRESS
#define LLDB_INVALID_ADDRESS
Definition: lldb-defines.h:74
lldb_private::MemoryCache::AddL1CacheData
void AddL1CacheData(lldb::addr_t addr, const void *src, size_t src_len)
Definition: Memory.cpp:41
lldb_private::Process::DoDeallocateMemory
virtual Status DoDeallocateMemory(lldb::addr_t ptr)
Actually deallocate memory in the process.
Definition: Process.h:1886
lldb_private::ProcessProperties::GetMemoryCacheLineSize
uint64_t GetMemoryCacheLineSize() const
Definition: Process.cpp:196
lldb_private
A class that represents a running process on the host machine.
Definition: SBCommandInterpreterRunOptions.h:16
lldb_private::AllocatedBlock::m_free_blocks
RangeVector< lldb::addr_t, uint32_t > m_free_blocks
Definition: Memory.h:104
lldb_private::AllocatedMemoryCache::Clear
void Clear()
Definition: Memory.cpp:335
lldb_private::AllocatedBlock::~AllocatedBlock
~AllocatedBlock()
State.h
lldb_private::RangeVector::FindEntryThatContains
const Entry * FindEntryThatContains(B addr) const
Definition: RangeMap.h:333
lldb_private::MemoryCache::Clear
void Clear(bool clear_invalid_ranges=false)
Definition: Memory.cpp:32
lldb_private::Log
Definition: Log.h:115
lldb_private::GetLog
Log * GetLog(Cat mask)
Retrieve the Log object for the channel associated with the given log enum.
Definition: Log.h:308
lldb_private::DataBufferHeap
Definition: DataBufferHeap.h:30
lldb_private::Range::Contains
bool Contains(BaseType r) const
Definition: RangeMap.h:88
lldb
Definition: SBAddress.h:15
LLDBLog.h
DataBufferHeap.h
UINT64_MAX
#define UINT64_MAX
Definition: lldb-defines.h:23
lldb_private::RangeVector::GetEntryAtIndex
const Entry * GetEntryAtIndex(size_t i) const
Definition: RangeMap.h:292
lldb_private::AllocatedMemoryCache::AllocatedBlockSP
std::shared_ptr< AllocatedBlock > AllocatedBlockSP
Definition: Memory.h:127
lldb_private::RangeVector::GetSize
size_t GetSize() const
Definition: RangeMap.h:290
lldb_private::Range::GetRangeEnd
BaseType GetRangeEnd() const
Definition: RangeMap.h:73