Skip to content

Commit a1fba83

Browse files
committed
Map unmapped harvested memory before returning it to cache.
1 parent b8b028d commit a1fba83

File tree

1 file changed

+12
-1
lines changed

1 file changed

+12
-1
lines changed

src/hotspot/share/gc/z/zPageAllocator.cpp

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -841,6 +841,8 @@ void ZPageAllocator::harvest_claimed_physical(ZPageAllocation* allocation) {
841841
segments.stash(allocation->claimed_mappings());
842842

843843
// Shuffle vmem. We allocate enough memory to cover the entire allocation size, not just the harvested memory.
844+
// If we fail to allocate additional virtual memory, the allocated virtual memory will match the harvested amount
845+
// instead of the allocation request.
844846
_virtual.shuffle_vmem_to_low_addresses_contiguous(allocation->size(), allocation->claimed_mappings());
845847

846848
// Restore segments
@@ -933,9 +935,18 @@ ZPage* ZPageAllocator::alloc_page_inner(ZPageAllocation* allocation) {
933935
allocation->claimed_mappings()->append(vmem);
934936
}
935937

936-
// Check if we've successfully gotten a large enough virtual address range
938+
// Check if we've successfully gotten a large enough virtual address range.
937939
if (!is_alloc_satisfied(allocation)) {
938940
log_error(gc)("Out of address space");
941+
942+
// The harvested memory has not been mapped yet. Map it before we put it back into the cache.
943+
if (allocation->harvested() > 0) {
944+
ZArrayIterator<ZMemoryRange> iter(allocation->claimed_mappings());
945+
for (ZMemoryRange vmem; iter.next(&vmem);) {
946+
map_virtual_to_physical(vmem);
947+
}
948+
}
949+
939950
free_memory_alloc_failed(allocation);
940951
return nullptr;
941952
}

0 commit comments

Comments
 (0)