24 MemoryCache::MemoryCache(
Process &process)
25 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
27 m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {}
33 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
36 if (clear_invalid_ranges)
48 const DataBufferSP &data_buffer_sp) {
49 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
57 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
62 BlockMap::iterator pos =
m_L1_cache.upper_bound(addr);
67 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
76 const addr_t end_addr = (addr + size - 1);
77 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
78 const addr_t last_cache_line_addr =
79 end_addr - (end_addr % cache_line_byte_size);
83 if (last_cache_line_addr >= first_cache_line_addr)
84 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) /
85 cache_line_byte_size) +
89 (
UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size;
92 for (
addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines;
93 curr_addr += cache_line_byte_size, ++cache_idx) {
94 BlockMap::iterator pos =
m_L2_cache.find(curr_addr);
104 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
114 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
128 size_t bytes_left = dst_len;
137 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
140 BlockMap::iterator pos =
m_L1_cache.upper_bound(addr);
144 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
145 if (chunk_range.
Contains(read_range)) {
146 memcpy(dst, pos->second->GetBytes() + (addr - chunk_range.
GetRangeBase()),
169 if (dst && bytes_left > 0) {
171 uint8_t *dst_buf = (uint8_t *)dst;
172 addr_t curr_addr = addr - (addr % cache_line_byte_size);
173 addr_t cache_offset = addr - curr_addr;
175 while (bytes_left > 0) {
177 error.SetErrorStringWithFormat(
"memory read failed for 0x%" PRIx64,
179 return dst_len - bytes_left;
182 BlockMap::const_iterator pos =
m_L2_cache.find(curr_addr);
183 BlockMap::const_iterator end =
m_L2_cache.end();
186 size_t curr_read_size = cache_line_byte_size - cache_offset;
187 if (curr_read_size > bytes_left)
188 curr_read_size = bytes_left;
190 memcpy(dst_buf + dst_len - bytes_left,
191 pos->second->GetBytes() + cache_offset, curr_read_size);
193 bytes_left -= curr_read_size;
194 curr_addr += curr_read_size + cache_offset;
197 if (bytes_left > 0) {
199 for (++pos; (pos != end) && (bytes_left > 0); ++pos) {
200 assert((curr_addr % cache_line_byte_size) == 0);
202 if (pos->first != curr_addr)
205 curr_read_size = pos->second->GetByteSize();
206 if (curr_read_size > bytes_left)
207 curr_read_size = bytes_left;
209 memcpy(dst_buf + dst_len - bytes_left, pos->second->GetBytes(),
212 bytes_left -= curr_read_size;
213 curr_addr += curr_read_size;
218 if (pos->second->GetByteSize() != cache_line_byte_size)
219 return dst_len - bytes_left;
226 if (bytes_left > 0) {
227 assert((curr_addr % cache_line_byte_size) == 0);
228 std::unique_ptr<DataBufferHeap> data_buffer_heap_up(
231 curr_addr, data_buffer_heap_up->GetBytes(),
232 data_buffer_heap_up->GetByteSize(),
error);
233 if (process_bytes_read == 0)
234 return dst_len - bytes_left;
236 if (process_bytes_read != cache_line_byte_size) {
237 if (process_bytes_read < data_buffer_heap_up->GetByteSize()) {
238 dst_len -= data_buffer_heap_up->GetByteSize() - process_bytes_read;
239 bytes_left = process_bytes_read;
241 data_buffer_heap_up->SetByteSize(process_bytes_read);
243 m_L2_cache[curr_addr] = DataBufferSP(data_buffer_heap_up.release());
250 return dst_len - bytes_left;
255 : m_range(addr, byte_size), m_permissions(permissions),
256 m_chunk_size(chunk_size)
260 assert(byte_size > chunk_size);
272 for (
size_t i=0; i<free_count; ++i)
276 if (range_size >= size)
281 addr_t addr = free_block.GetRangeBase();
303 free_block.SetRangeBase(reserved_block.
GetRangeEnd());
304 free_block.SetByteSize(bytes_left);
306 LLDB_LOGV(log,
"({0}) (size = {1} ({1:x})) => {2:x}",
this, size, addr);
311 LLDB_LOGV(log,
"({0}) (size = {1} ({1:x})) => {2:x}",
this, size,
317 bool success =
false;
326 LLDB_LOGV(log,
"({0}) (addr = {1:x}) => {2}",
this, addr, success);
331 : m_process(process), m_mutex(), m_memory_map() {}
336 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
338 PermissionsToBlockMap::iterator pos, end =
m_memory_map.end();
349 const size_t page_size = 4096;
350 const size_t num_pages = (byte_size + page_size - 1) / page_size;
351 const size_t page_byte_size = num_pages * page_size;
358 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
359 ", permissions = %s) => 0x%16.16" PRIx64,
365 block_sp = std::make_shared<AllocatedBlock>(addr, page_byte_size,
366 permissions, chunk_size);
367 m_memory_map.insert(std::make_pair(permissions, block_sp));
375 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
378 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator>
381 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second;
383 addr = (*pos).second->ReserveBlock(byte_size);
392 addr = block_sp->ReserveBlock(byte_size);
396 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
397 ", permissions = %s) => 0x%16.16" PRIx64,
404 std::lock_guard<std::recursive_mutex> guard(
m_mutex);
406 PermissionsToBlockMap::iterator pos, end =
m_memory_map.end();
407 bool success =
false;
409 if (pos->second->Contains(addr)) {
410 success = pos->second->FreeBlock(addr);
416 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
418 (uint64_t)addr, success);