25 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
27 m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {}
33 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
36 if (clear_invalid_ranges)
49 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
57 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
62 BlockMap::iterator pos =
m_L1_cache.upper_bound(addr);
67 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
76 const addr_t end_addr = (addr + size - 1);
77 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
78 const addr_t last_cache_line_addr =
79 end_addr - (end_addr % cache_line_byte_size);
82 uint32_t num_cache_lines;
83 if (last_cache_line_addr >= first_cache_line_addr)
84 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) /
85 cache_line_byte_size) +
89 (
UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size;
91 uint32_t cache_idx = 0;
92 for (
addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines;
93 curr_addr += cache_line_byte_size, ++cache_idx) {
94 BlockMap::iterator pos =
m_L2_cache.find(curr_addr);
104 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
114 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
131 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
136 auto data_buffer_heap_sp =
139 line_base_addr, data_buffer_heap_sp->GetBytes(),
140 data_buffer_heap_sp->GetByteSize(),
error);
143 if (process_bytes_read == 0)
148 data_buffer_heap_sp->SetByteSize(process_bytes_read);
150 m_L2_cache[line_base_addr] = data_buffer_heap_sp;
151 return data_buffer_heap_sp;
156 if (!dst || dst_len == 0)
159 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
167 error.SetErrorStringWithFormat(
"memory read failed for 0x%" PRIx64, addr);
177 BlockMap::iterator pos =
m_L1_cache.upper_bound(addr);
181 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
182 if (chunk_range.
Contains(read_range)) {
183 memcpy(dst, pos->second->GetBytes() + (addr - chunk_range.
GetRangeBase()),
207 addr_t cache_line_base_addr = addr - cache_line_offset;
211 if (!first_cache_line)
216 if (cache_line_offset >= first_cache_line->GetByteSize())
219 uint8_t *dst_buf = (uint8_t *)dst;
220 size_t bytes_left = dst_len;
221 size_t read_size = first_cache_line->GetByteSize() - cache_line_offset;
222 if (read_size > bytes_left)
223 read_size = bytes_left;
225 memcpy(dst_buf + dst_len - bytes_left,
226 first_cache_line->GetBytes() + cache_line_offset, read_size);
227 bytes_left -= read_size;
233 return dst_len - bytes_left;
236 if (bytes_left > 0) {
243 error.SetErrorStringWithFormat(
"memory read failed for 0x%" PRIx64,
244 cache_line_base_addr);
245 return dst_len - bytes_left;
250 if (!second_cache_line)
251 return dst_len - bytes_left;
253 read_size = bytes_left;
254 if (read_size > second_cache_line->GetByteSize())
255 read_size = second_cache_line->GetByteSize();
257 memcpy(dst_buf + dst_len - bytes_left, second_cache_line->GetBytes(),
259 bytes_left -= read_size;
261 return dst_len - bytes_left;
268 uint32_t permissions, uint32_t chunk_size)
269 : m_range(addr, byte_size), m_permissions(permissions),
270 m_chunk_size(chunk_size)
274 assert(byte_size > chunk_size);
286 for (
size_t i=0; i<free_count; ++i)
290 if (range_size >= size)
295 addr_t addr = free_block.GetRangeBase();
317 free_block.SetRangeBase(reserved_block.
GetRangeEnd());
318 free_block.SetByteSize(bytes_left);
320 LLDB_LOGV(log,
"({0}) (size = {1} ({1:x})) => {2:x}",
this, size, addr);
325 LLDB_LOGV(log,
"({0}) (size = {1} ({1:x})) => {2:x}",
this, size,
331 bool success =
false;
340 LLDB_LOGV(log,
"({0}) (addr = {1:x}) => {2}",
this, addr, success);
345 : m_process(process), m_mutex(), m_memory_map() {}
350 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
352 PermissionsToBlockMap::iterator pos, end =
m_memory_map.end();
363 const size_t page_size = 4096;
364 const size_t num_pages = (byte_size + page_size - 1) / page_size;
365 const size_t page_byte_size = num_pages * page_size;
372 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
373 ", permissions = %s) => 0x%16.16" PRIx64,
379 block_sp = std::make_shared<AllocatedBlock>(addr, page_byte_size,
380 permissions, chunk_size);
381 m_memory_map.insert(std::make_pair(permissions, block_sp));
387 uint32_t permissions,
389 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
392 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator>
395 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second;
397 addr = (*pos).second->ReserveBlock(byte_size);
406 addr = block_sp->ReserveBlock(byte_size);
410 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
411 ", permissions = %s) => 0x%16.16" PRIx64,
418 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
420 PermissionsToBlockMap::iterator pos, end =
m_memory_map.end();
421 bool success =
false;
423 if (pos->second->Contains(addr)) {
424 success = pos->second->FreeBlock(addr);
430 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
432 (uint64_t)addr, success);
static llvm::raw_ostream & error(Stream &strm)
#define LLDB_LOGF(log,...)
#define LLDB_LOGV(log,...)
uint32_t CalculateChunksNeededForSize(uint32_t size) const
bool FreeBlock(lldb::addr_t addr)
lldb::addr_t ReserveBlock(uint32_t size)
Range< lldb::addr_t, uint32_t > m_range
AllocatedBlock(lldb::addr_t addr, uint32_t byte_size, uint32_t permissions, uint32_t chunk_size)
RangeVector< lldb::addr_t, uint32_t > m_free_blocks
RangeVector< lldb::addr_t, uint32_t > m_reserved_blocks
const uint32_t m_chunk_size
lldb::addr_t AllocateMemory(size_t byte_size, uint32_t permissions, Status &error)
AllocatedMemoryCache(Process &process)
std::recursive_mutex m_mutex
void Clear(bool deallocate_memory)
bool DeallocateMemory(lldb::addr_t ptr)
std::shared_ptr< AllocatedBlock > AllocatedBlockSP
PermissionsToBlockMap m_memory_map
AllocatedBlockSP AllocatePage(uint32_t byte_size, uint32_t permissions, uint32_t chunk_size, Status &error)
A subclass of DataBuffer that stores a data buffer on the heap.
MemoryCache(Process &process)
std::recursive_mutex m_mutex
bool RemoveInvalidRange(lldb::addr_t base_addr, lldb::addr_t byte_size)
void Flush(lldb::addr_t addr, size_t size)
void AddL1CacheData(lldb::addr_t addr, const void *src, size_t src_len)
InvalidRanges m_invalid_ranges
void Clear(bool clear_invalid_ranges=false)
size_t Read(lldb::addr_t addr, void *dst, size_t dst_len, Status &error)
void AddInvalidRange(lldb::addr_t base_addr, lldb::addr_t byte_size)
lldb::DataBufferSP GetL2CacheLine(lldb::addr_t addr, Status &error)
uint32_t m_L2_cache_line_byte_size
uint64_t GetMemoryCacheLineSize() const
A plug-in interface definition class for debugging a process.
size_t ReadMemoryFromInferior(lldb::addr_t vm_addr, void *buf, size_t size, Status &error)
Read of memory from a process.
virtual bool IsAlive()
Check if a process is still alive.
virtual Status DoDeallocateMemory(lldb::addr_t ptr)
Actually deallocate memory in the process.
virtual lldb::addr_t DoAllocateMemory(size_t size, uint32_t permissions, Status &error)
Actually allocate memory in the process.
const Entry * FindEntryThatContains(B addr) const
uint32_t FindEntryIndexThatContains(B addr) const
const Entry * GetEntryAtIndex(size_t i) const
Entry & GetEntryRef(size_t i)
bool RemoveEntryAtIndex(uint32_t idx)
void Append(const Entry &entry)
void Insert(const Entry &entry, bool combine)
#define LLDB_INVALID_ADDRESS
A class that represents a running process on the host machine.
Log * GetLog(Cat mask)
Retrieve the Log object for the channel associated with the given log enum.
const char * GetPermissionsAsCString(uint32_t permissions)
std::shared_ptr< lldb_private::DataBuffer > DataBufferSP
bool Contains(BaseType r) const
BaseType GetRangeBase() const
SizeType GetByteSize() const
BaseType GetRangeEnd() const
bool DoesIntersect(const Range &rhs) const
void SetByteSize(SizeType s)