Skip to content

Commit 7848b0a

Browse files
committed
Cleanup improvements in ZPageAllocator for better readability
1 parent 845804c commit 7848b0a

File tree

2 files changed

+89
-69
lines changed

2 files changed

+89
-69
lines changed

src/hotspot/share/gc/z/zPageAllocator.cpp

Lines changed: 81 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -606,6 +606,10 @@ void ZPageAllocator::sort_segments_physical(const ZMemoryRange& vmem) {
606606
sort_zoffset_ptrs(_physical_mappings.get_addr(vmem.start()), vmem.size_in_granules());
607607
}
608608

609+
void ZPageAllocator::alloc_physical(const ZMemoryRange& vmem, int numa_id) {
610+
_physical.alloc(_physical_mappings.get_addr(vmem.start()), vmem.size(), numa_id);
611+
}
612+
609613
void ZPageAllocator::free_physical(const ZMemoryRange& vmem, int numa_id) {
610614
// Free physical memory
611615
_physical.free(_physical_mappings.get_addr(vmem.start()), vmem.size(), numa_id);
@@ -681,6 +685,43 @@ void ZPageAllocator::remap_and_defragment_mapping(const ZMemoryRange& vmem, ZArr
681685
}
682686
}
683687

688+
static void check_out_of_memory_during_initialization() {
689+
if (!is_init_completed()) {
690+
vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
691+
}
692+
}
693+
694+
bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
695+
ZStatTimer timer(ZCriticalPhaseAllocationStall);
696+
EventZAllocationStall event;
697+
698+
// We can only block if the VM is fully initialized
699+
check_out_of_memory_during_initialization();
700+
701+
// Start asynchronous minor GC
702+
const ZDriverRequest request(GCCause::_z_allocation_stall, ZYoungGCThreads, 0);
703+
ZDriver::minor()->collect(request);
704+
705+
// Wait for allocation to complete or fail
706+
const bool result = allocation->wait();
707+
708+
{
709+
// Guard deletion of underlying semaphore. This is a workaround for
710+
// a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
711+
// the semaphore immediately after returning from sem_wait(). The
712+
// reason is that sem_post() can touch the semaphore after a waiting
713+
// thread have returned from sem_wait(). To avoid this race we are
714+
// forcing the waiting thread to acquire/release the lock held by the
715+
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
716+
ZLocker<ZLock> locker(&_lock);
717+
}
718+
719+
// Send event
720+
event.commit((u8)allocation->type(), allocation->size());
721+
722+
return result;
723+
}
724+
684725
bool ZPageAllocator::claim_mapped_or_increase_capacity(ZCacheState& state, ZPageAllocation* allocation) {
685726
ZMappedCache& cache = state._cache;
686727
const size_t size = allocation->size();
@@ -764,43 +805,6 @@ bool ZPageAllocator::claim_physical_round_robin(ZPageAllocation* allocation) {
764805
return false;
765806
}
766807

767-
static void check_out_of_memory_during_initialization() {
768-
if (!is_init_completed()) {
769-
vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
770-
}
771-
}
772-
773-
bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
774-
ZStatTimer timer(ZCriticalPhaseAllocationStall);
775-
EventZAllocationStall event;
776-
777-
// We can only block if the VM is fully initialized
778-
check_out_of_memory_during_initialization();
779-
780-
// Start asynchronous minor GC
781-
const ZDriverRequest request(GCCause::_z_allocation_stall, ZYoungGCThreads, 0);
782-
ZDriver::minor()->collect(request);
783-
784-
// Wait for allocation to complete or fail
785-
const bool result = allocation->wait();
786-
787-
{
788-
// Guard deletion of underlying semaphore. This is a workaround for
789-
// a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
790-
// the semaphore immediately after returning from sem_wait(). The
791-
// reason is that sem_post() can touch the semaphore after a waiting
792-
// thread have returned from sem_wait(). To avoid this race we are
793-
// forcing the waiting thread to acquire/release the lock held by the
794-
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
795-
ZLocker<ZLock> locker(&_lock);
796-
}
797-
798-
// Send event
799-
event.commit((u8)allocation->type(), allocation->size());
800-
801-
return result;
802-
}
803-
804808
bool ZPageAllocator::claim_physical_or_stall(ZPageAllocation* allocation) {
805809
{
806810
ZLocker<ZLock> locker(&_lock);
@@ -872,6 +876,36 @@ bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const {
872876
return true;
873877
}
874878

879+
bool ZPageAllocator::claim_virtual_memory(ZPageAllocation* allocation) {
880+
if (allocation->harvested() > 0) {
881+
// If we have harvested anything, we claim virtual memory from the harvested
882+
// mappings, and perhaps also allocate more to match the allocation request.
883+
harvest_claimed_physical(allocation);
884+
} else {
885+
// If we have not harvested anything, we have only increased capacity.
886+
// Allocate new virtual memory from the manager.
887+
ZMemoryRange vmem = _virtual.alloc(allocation->size(), allocation->numa_id(), true /* force_low_address */);
888+
allocation->claimed_mappings()->append(vmem);
889+
}
890+
891+
// If we have enough virtual memory to cover the allocation request,
892+
// we're done.
893+
if (is_alloc_satisfied(allocation)) {
894+
return true;
895+
}
896+
897+
// Before returning the harvested memory to the cache it must be mapped.
898+
if (allocation->harvested() > 0) {
899+
ZArrayIterator<ZMemoryRange> iter(allocation->claimed_mappings());
900+
for (ZMemoryRange vmem; iter.next(&vmem);) {
901+
map_virtual_to_physical(vmem);
902+
}
903+
}
904+
905+
// Failed to allocate enough to virtual memory from the manager.
906+
return false;
907+
}
908+
875909
bool ZPageAllocator::commit_and_map_memory(ZPageAllocation* allocation, const ZMemoryRange& vmem, size_t committed_size) {
876910
ZMemoryRange to_be_committed_vmem = vmem;
877911
ZMemoryRange committed_vmem = to_be_committed_vmem.split_from_front(committed_size);
@@ -907,6 +941,7 @@ bool ZPageAllocator::commit_and_map_memory(ZPageAllocation* allocation, const ZM
907941
return true;
908942
}
909943

944+
910945
ZPage* ZPageAllocator::alloc_page_inner(ZPageAllocation* allocation) {
911946
retry:
912947
// Claim physical memory by taking it from the mapped cache or by increasing
@@ -918,48 +953,30 @@ ZPage* ZPageAllocator::alloc_page_inner(ZPageAllocation* allocation) {
918953
return nullptr;
919954
}
920955

921-
// If the claimed physical memory holds a large enough contiguous virtual
922-
// address range, we're done.
956+
// If we have claimed a large enough contiguous mapping from the cache,
957+
// we're done.
923958
if (is_alloc_satisfied(allocation)) {
924959
ZMemoryRange vmem = allocation->claimed_mappings()->pop();
925960
return new ZPage(allocation->type(), vmem);
926961
}
927962

928-
if (allocation->harvested() > 0) {
929-
// We allocate virtual memory while harvesting into a contiguous mapping
930-
// since we potentially re-use the virtual address of the harvested memory.
931-
harvest_claimed_physical(allocation);
932-
} else {
933-
// Only increased capacity, nothing harvested. Allocate new virtual memory.
934-
ZMemoryRange vmem = _virtual.alloc(allocation->size(), allocation->numa_id(), true /* force_low_address */);
935-
allocation->claimed_mappings()->append(vmem);
936-
}
937-
938-
// Check if we've successfully gotten a large enough virtual address range.
939-
if (!is_alloc_satisfied(allocation)) {
963+
// Claim virtual memory, either by harvesting or by allocating new from the
964+
// virtual manager.
965+
if (!claim_virtual_memory(allocation)) {
940966
log_error(gc)("Out of address space");
941-
942-
// The harvested memory has not been mapped yet. Map it before we put it back into the cache.
943-
if (allocation->harvested() > 0) {
944-
ZArrayIterator<ZMemoryRange> iter(allocation->claimed_mappings());
945-
for (ZMemoryRange vmem; iter.next(&vmem);) {
946-
map_virtual_to_physical(vmem);
947-
}
948-
}
949-
950967
free_memory_alloc_failed(allocation);
951968
return nullptr;
952969
}
953970

954971
ZMemoryRange vmem = allocation->claimed_mappings()->pop();
955-
const size_t remaining_physical = allocation->size() - allocation->harvested();
956972

957973
// Allocate any remaining physical memory. Capacity and used has already been
958974
// adjusted, we just need to fetch the memory, which is guaranteed to succeed.
975+
const size_t remaining_physical = allocation->size() - allocation->harvested();
959976
if (remaining_physical > 0) {
960977
allocation->set_committed(remaining_physical);
961-
zoffset* mapping_addr = _physical_mappings.get_addr(vmem.start() + allocation->harvested());
962-
_physical.alloc(mapping_addr, remaining_physical, allocation->numa_id());
978+
ZMemoryRange uncommitted_range = ZMemoryRange(vmem.start() + allocation->harvested(), remaining_physical);
979+
alloc_physical(uncommitted_range, allocation->numa_id());
963980
}
964981

965982
if (!commit_and_map_memory(allocation, vmem, allocation->harvested())) {
@@ -975,8 +992,7 @@ void ZPageAllocator::alloc_page_age_update(ZPage* page, size_t size, ZPageAge ag
975992
// to the allocating thread. The overall heap "used" is tracked in
976993
// the lower-level allocation code.
977994
const ZGenerationId id = age == ZPageAge::old ? ZGenerationId::old : ZGenerationId::young;
978-
ZCacheState& state = _states.get(numa_id);
979-
state.increase_used_generation(id, size);
995+
_states.get(numa_id).increase_used_generation(id, size);
980996

981997
// Reset page. This updates the page's sequence number and must
982998
// be done after we potentially blocked in a safepoint (stalled)

src/hotspot/share/gc/z/zPageAllocator.hpp

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ class ZPageAllocator {
7373
size_t count_segments_physical(const ZMemoryRange& vmem);
7474
void sort_segments_physical(const ZMemoryRange& vmem);
7575

76+
void alloc_physical(const ZMemoryRange& vmem, int numa_id);
7677
void free_physical(const ZMemoryRange& vmem, int numa_id);
7778
bool commit_physical(ZMemoryRange* vmem, int numa_id);
7879
void uncommit_physical(const ZMemoryRange& vmem);
@@ -85,19 +86,22 @@ class ZPageAllocator {
8586
void remap_and_defragment_mapping(const ZMemoryRange& mapping, ZArray<ZMemoryRange>* entries);
8687
void prepare_memory_for_free(ZPage* page, ZArray<ZMemoryRange>* entries, bool allow_defragment);
8788

89+
bool alloc_page_stall(ZPageAllocation* allocation);
90+
8891
bool claim_mapped_or_increase_capacity(ZCacheState& state, ZPageAllocation* allocation);
8992
bool claim_physical(ZPageAllocation* allocation, ZCacheState& state);
9093
bool claim_physical_round_robin(ZPageAllocation* allocation);
91-
bool alloc_page_stall(ZPageAllocation* allocation);
9294
bool claim_physical_or_stall(ZPageAllocation* allocation);
95+
96+
void harvest_claimed_physical(ZPageAllocation* allocation);
9397
bool is_alloc_satisfied(ZPageAllocation* allocation) const;
98+
bool claim_virtual_memory(ZPageAllocation* allocation);
99+
100+
bool commit_and_map_memory(ZPageAllocation* allocation, const ZMemoryRange& vmem, size_t committed_size);
94101

95102
ZPage* alloc_page_inner(ZPageAllocation* allocation);
96103
void alloc_page_age_update(ZPage* page, size_t size, ZPageAge age, int numa_id);
97104

98-
void harvest_claimed_physical(ZPageAllocation* allocation);
99-
100-
bool commit_and_map_memory(ZPageAllocation* allocation, const ZMemoryRange& vmem, size_t committed_size);
101105
void free_memory_alloc_failed(ZPageAllocation* allocation);
102106

103107
void satisfy_stalled();

0 commit comments

Comments
 (0)