From 0a67ca44debd445adeb9aa35716ab11b96fc5e26 Mon Sep 17 00:00:00 2001 From: Albert Yang Date: Thu, 17 Jul 2025 14:11:08 +0200 Subject: [PATCH] pgc-largepage --- .../share/gc/parallel/mutableNUMASpace.cpp | 122 ++++++------------ .../share/gc/parallel/mutableNUMASpace.hpp | 13 +- .../share/gc/parallel/mutableSpace.cpp | 45 +++---- .../share/gc/parallel/mutableSpace.hpp | 11 +- .../share/gc/parallel/objectStartArray.cpp | 5 +- .../share/gc/parallel/parallelArguments.cpp | 20 ++- .../gc/parallel/parallelScavengeHeap.cpp | 26 +++- .../gc/parallel/parallelScavengeHeap.hpp | 5 + src/hotspot/share/gc/parallel/psOldGen.cpp | 2 +- .../share/gc/parallel/psVirtualspace.cpp | 7 +- .../share/gc/parallel/psVirtualspace.hpp | 4 + src/hotspot/share/gc/parallel/psYoungGen.cpp | 8 +- .../gc/parallel/vmStructs_parallelgc.hpp | 1 + src/hotspot/share/memory/universe.cpp | 18 ++- src/hotspot/share/memory/universe.hpp | 2 +- 15 files changed, 140 insertions(+), 149 deletions(-) diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp index d07a989c3a698..87c7e358f139e 100644 --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp @@ -24,6 +24,7 @@ #include "gc/parallel/mutableNUMASpace.hpp" #include "gc/shared/collectedHeap.hpp" +#include "gc/shared/gcArguments.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/shared/spaceDecorator.hpp" #include "gc/shared/workerThread.hpp" @@ -37,21 +38,11 @@ #include "runtime/threadSMR.hpp" #include "utilities/align.hpp" -MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) { +MutableNUMASpace::MutableNUMASpace(size_t page_size) : MutableSpace(page_size) { _lgrp_spaces = new (mtGC) GrowableArray(0, mtGC); - _page_size = os::vm_page_size(); _adaptation_cycles = 0; _samples_count = 0; -#ifdef LINUX - // Changing the page size can lead to freeing of memory. When using large pages - // and the memory has been both reserved and committed, Linux does not support - // freeing parts of it. - if (UseLargePages && !os::can_commit_large_page_memory()) { - _must_use_large_pages = true; - } -#endif // LINUX - size_t lgrp_limit = os::numa_get_groups_num(); uint *lgrp_ids = NEW_C_HEAP_ARRAY(uint, lgrp_limit, mtGC); size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); @@ -60,7 +51,7 @@ MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), lgrp_spaces()->reserve(checked_cast(lgrp_num)); // Add new spaces for the new nodes for (size_t i = 0; i < lgrp_num; i++) { - lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment)); + lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], page_size)); } FREE_C_HEAP_ARRAY(uint, lgrp_ids); @@ -189,22 +180,19 @@ size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { // Bias region towards the first-touching lgrp. Set the right page sizes. void MutableNUMASpace::bias_region(MemRegion mr, uint lgrp_id) { - HeapWord *start = align_up(mr.start(), page_size()); - HeapWord *end = align_down(mr.end(), page_size()); - if (end > start) { - MemRegion aligned_region(start, end); - assert((intptr_t)aligned_region.start() % page_size() == 0 && - (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); - assert(region().contains(aligned_region), "Sanity"); - // First we tell the OS which page size we want in the given range. The underlying - // large page can be broken down if we require small pages. - const size_t os_align = UseLargePages ? page_size() : os::vm_page_size(); - os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), os_align); - // Then we uncommit the pages in the range. - os::disclaim_memory((char*)aligned_region.start(), aligned_region.byte_size()); - // And make them local/first-touch biased. - os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), checked_cast(lgrp_id)); - } + assert(is_aligned(mr.start(), page_size()), "precondition"); + assert(is_aligned(mr.end(), page_size()), "precondition"); + + if (mr.is_empty()) { + return; + } + // First we tell the OS which page size we want in the given range. The underlying + // large page can be broken down if we require small pages. + os::realign_memory((char*) mr.start(), mr.byte_size(), SpaceAlignment); + // Then we uncommit the pages in the range. + os::disclaim_memory((char*) mr.start(), mr.byte_size()); + // And make them local/first-touch biased. + os::numa_make_local((char*)mr.start(), mr.byte_size(), checked_cast(lgrp_id)); } // Update space layout. Perform adaptation. @@ -253,14 +241,15 @@ size_t MutableNUMASpace::current_chunk_size(int i) { // Return the default chunk size by equally diving the space. // page_size() aligned. size_t MutableNUMASpace::default_chunk_size() { - return base_space_size() / lgrp_spaces()->length() * page_size(); + // The number of pages may not be evenly divided. + return align_down(capacity_in_bytes() / lgrp_spaces()->length(), page_size()); } // Produce a new chunk size. page_size() aligned. // This function is expected to be called on sequence of i's from 0 to // lgrp_spaces()->length(). size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { - size_t pages_available = base_space_size(); + size_t pages_available = capacity_in_bytes() / page_size(); for (int j = 0; j < i; j++) { pages_available -= align_down(current_chunk_size(j), page_size()) / page_size(); } @@ -306,20 +295,13 @@ size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { // |----bottom_region--|---intersection---|------top_region------| void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, MemRegion* bottom_region, MemRegion *top_region) { + assert(is_aligned(new_region.start(), page_size()), "precondition"); + assert(is_aligned(new_region.end(), page_size()), "precondition"); + assert(is_aligned(intersection.start(), page_size()), "precondition"); + assert(is_aligned(intersection.end(), page_size()), "precondition"); + // Is there bottom? if (new_region.start() < intersection.start()) { // Yes - // Try to coalesce small pages into a large one. - if (UseLargePages && page_size() >= alignment()) { - HeapWord* p = align_up(intersection.start(), alignment()); - if (new_region.contains(p) - && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) { - if (intersection.contains(p)) { - intersection = MemRegion(p, intersection.end()); - } else { - intersection = MemRegion(p, p); - } - } - } *bottom_region = MemRegion(new_region.start(), intersection.start()); } else { *bottom_region = MemRegion(); @@ -327,18 +309,6 @@ void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection // Is there top? if (intersection.end() < new_region.end()) { // Yes - // Try to coalesce small pages into a large one. - if (UseLargePages && page_size() >= alignment()) { - HeapWord* p = align_down(intersection.end(), alignment()); - if (new_region.contains(p) - && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) { - if (intersection.contains(p)) { - intersection = MemRegion(intersection.start(), p); - } else { - intersection = MemRegion(p, p); - } - } - } *top_region = MemRegion(intersection.end(), new_region.end()); } else { *top_region = MemRegion(); @@ -352,6 +322,8 @@ void MutableNUMASpace::initialize(MemRegion mr, WorkerThreads* pretouch_workers) { assert(clear_space, "Reallocation will destroy data!"); assert(lgrp_spaces()->length() > 0, "There should be at least one space"); + assert(is_aligned(mr.start(), page_size()), "precondition"); + assert(is_aligned(mr.end(), page_size()), "precondition"); MemRegion old_region = region(), new_region; set_bottom(mr.start()); @@ -359,37 +331,19 @@ void MutableNUMASpace::initialize(MemRegion mr, // Must always clear the space clear(SpaceDecorator::DontMangle); - // Compute chunk sizes - size_t prev_page_size = page_size(); - set_page_size(alignment()); - HeapWord* rounded_bottom = align_up(bottom(), page_size()); - HeapWord* rounded_end = align_down(end(), page_size()); - size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); - - // Try small pages if the chunk size is too small - if (base_space_size_pages / lgrp_spaces()->length() == 0 - && page_size() > os::vm_page_size()) { - // Changing the page size below can lead to freeing of memory. So we fail initialization. - if (_must_use_large_pages) { - vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size"); - } - set_page_size(os::vm_page_size()); - rounded_bottom = align_up(bottom(), page_size()); - rounded_end = align_down(end(), page_size()); - base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); + size_t num_pages = mr.byte_size() / page_size(); + + if (num_pages < (size_t)lgrp_spaces()->length()) { + vm_exit_during_initialization(err_msg("Failed initializing NUMA, #pages-per-CPU is less than one: space-size: %zu, page-size: %zu, #CPU: %d", + mr.byte_size(), page_size(), lgrp_spaces()->length())); } - guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small"); - set_base_space_size(base_space_size_pages); // Handle space resize MemRegion top_region, bottom_region; if (!old_region.equals(region())) { - new_region = MemRegion(rounded_bottom, rounded_end); + new_region = mr; MemRegion intersection = new_region.intersection(old_region); - if (intersection.start() == nullptr || - intersection.end() == nullptr || - prev_page_size > page_size()) { // If the page size got smaller we have to change - // the page size preference for the whole space. + if (intersection.is_empty()) { intersection = MemRegion(new_region.start(), new_region.start()); } select_tails(new_region, intersection, &bottom_region, &top_region); @@ -436,19 +390,20 @@ void MutableNUMASpace::initialize(MemRegion mr, if (i == 0) { // Bottom chunk if (i != lgrp_spaces()->length() - 1) { - new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); + new_region = MemRegion(bottom(), chunk_byte_size >> LogHeapWordSize); } else { new_region = MemRegion(bottom(), end()); } - } else + } else { if (i < lgrp_spaces()->length() - 1) { // Middle chunks MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); new_region = MemRegion(ps->end(), - ps->end() + (chunk_byte_size >> LogHeapWordSize)); + chunk_byte_size >> LogHeapWordSize); } else { // Top chunk MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); new_region = MemRegion(ps->end(), end()); } + } guarantee(region().contains(new_region), "Region invariant"); @@ -475,9 +430,8 @@ void MutableNUMASpace::initialize(MemRegion mr, // Clear space (set top = bottom) but never mangle. s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages); - - set_adaptation_cycles(samples_count()); } + set_adaptation_cycles(samples_count()); } // Set the top of the whole space. diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp index abb4c77952af3..ed2195f011b0b 100644 --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp @@ -82,8 +82,8 @@ class MutableNUMASpace : public MutableSpace { SpaceStats _space_stats; public: - LGRPSpace(uint l, size_t alignment) : _lgrp_id(l), _allocation_failed(false) { - _space = new MutableSpace(alignment); + LGRPSpace(uint l, size_t page_size) : _lgrp_id(l), _allocation_failed(false) { + _space = new MutableSpace(page_size); _alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight); } ~LGRPSpace() { @@ -119,14 +119,8 @@ class MutableNUMASpace : public MutableSpace { }; GrowableArray* _lgrp_spaces; - size_t _page_size; unsigned _adaptation_cycles, _samples_count; - bool _must_use_large_pages; - - void set_page_size(size_t psz) { _page_size = psz; } - size_t page_size() const { return _page_size; } - unsigned adaptation_cycles() { return _adaptation_cycles; } void set_adaptation_cycles(int v) { _adaptation_cycles = v; } @@ -135,7 +129,6 @@ class MutableNUMASpace : public MutableSpace { size_t _base_space_size; void set_base_space_size(size_t v) { _base_space_size = v; } - size_t base_space_size() const { return _base_space_size; } // Bias region towards the lgrp. void bias_region(MemRegion mr, uint lgrp_id); @@ -156,7 +149,7 @@ class MutableNUMASpace : public MutableSpace { public: GrowableArray* lgrp_spaces() const { return _lgrp_spaces; } - MutableNUMASpace(size_t alignment); + MutableNUMASpace(size_t page_size); virtual ~MutableNUMASpace(); // Space initialization. virtual void initialize(MemRegion mr, diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index 498fb12511c36..b1fa4e740d6ee 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -34,30 +34,26 @@ #include "utilities/align.hpp" #include "utilities/macros.hpp" -MutableSpace::MutableSpace(size_t alignment) : +MutableSpace::MutableSpace(size_t page_size) : _last_setup_region(), - _alignment(alignment), + _page_size(page_size), _bottom(nullptr), _top(nullptr), - _end(nullptr) -{ - assert(MutableSpace::alignment() % os::vm_page_size() == 0, - "Space should be aligned"); -} + _end(nullptr) {} -void MutableSpace::numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space) { - if (!mr.is_empty()) { - HeapWord *start = align_up(mr.start(), page_size); - HeapWord *end = align_down(mr.end(), page_size); - if (end > start) { - size_t size = pointer_delta(end, start, sizeof(char)); - if (clear_space) { - // Prefer page reallocation to migration. - os::disclaim_memory((char*)start, size); - } - os::numa_make_global((char*)start, size); - } +void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) { + assert(is_aligned(mr.start(), page_size()), "precondition"); + assert(is_aligned(mr.end(), page_size()), "precondition"); + + if (mr.is_empty()) { + return; } + + if (clear_space) { + // Prefer page reallocation to migration. + os::disclaim_memory((char*) mr.start(), mr.byte_size()); + } + os::numa_make_global((char*) mr.start(), mr.byte_size()); } void MutableSpace::initialize(MemRegion mr, @@ -105,20 +101,17 @@ void MutableSpace::initialize(MemRegion mr, } assert(mr.contains(head) && mr.contains(tail), "Sanity"); - size_t page_size = alignment(); - if (UseNUMA) { - numa_setup_pages(head, page_size, clear_space); - numa_setup_pages(tail, page_size, clear_space); + numa_setup_pages(head, clear_space); + numa_setup_pages(tail, clear_space); } if (AlwaysPreTouch) { - size_t pretouch_page_size = UseLargePages ? page_size : os::vm_page_size(); PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(), - pretouch_page_size, pretouch_workers); + page_size(), pretouch_workers); PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(), - pretouch_page_size, pretouch_workers); + page_size(), pretouch_workers); } // Remember where we stopped so that we can continue later. diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp index d09a2b2df89b7..d335920cc11cb 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp @@ -51,17 +51,20 @@ class MutableSpace: public CHeapObj { // The last region which page had been setup to be interleaved. MemRegion _last_setup_region; - size_t _alignment; + size_t _page_size; HeapWord* _bottom; HeapWord* volatile _top; HeapWord* _end; - void numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space); + void numa_setup_pages(MemRegion mr, bool clear_space); void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; } MemRegion last_setup_region() const { return _last_setup_region; } - public: +protected: + size_t page_size() const { return _page_size; } + +public: virtual ~MutableSpace() = default; MutableSpace(size_t page_size); @@ -77,8 +80,6 @@ class MutableSpace: public CHeapObj { HeapWord* volatile* top_addr() { return &_top; } HeapWord** end_addr() { return &_end; } - size_t alignment() { return _alignment; } - MemRegion region() const { return MemRegion(bottom(), end()); } size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; } diff --git a/src/hotspot/share/gc/parallel/objectStartArray.cpp b/src/hotspot/share/gc/parallel/objectStartArray.cpp index d120c71d2fa2c..255ee0c56ec42 100644 --- a/src/hotspot/share/gc/parallel/objectStartArray.cpp +++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp @@ -47,7 +47,10 @@ ObjectStartArray::ObjectStartArray(MemRegion covered_region) // Do not use large-pages for the backing store. The one large page region // will be used for the heap proper. - ReservedSpace backing_store = MemoryReserver::reserve(bytes_to_reserve, mtGC); + ReservedSpace backing_store = MemoryReserver::reserve(bytes_to_reserve, + os::vm_allocation_granularity(), + os::vm_page_size(), + mtGC); if (!backing_store.is_reserved()) { vm_exit_during_initialization("Could not reserve space for ObjectStartArray"); } diff --git a/src/hotspot/share/gc/parallel/parallelArguments.cpp b/src/hotspot/share/gc/parallel/parallelArguments.cpp index 780185952b4cb..5e0b1c15e8875 100644 --- a/src/hotspot/share/gc/parallel/parallelArguments.cpp +++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp @@ -103,15 +103,10 @@ void ParallelArguments::initialize() { FullGCForwarding::initialize_flags(heap_reserved_size_bytes()); } -// The alignment used for spaces in young gen and old gen -static size_t default_space_alignment() { - return 64 * K * HeapWordSize; -} - void ParallelArguments::initialize_alignments() { // Initialize card size before initializing alignments CardTable::initialize_card_size(); - SpaceAlignment = default_space_alignment(); + SpaceAlignment = ParallelScavengeHeap::default_space_alignment(); HeapAlignment = compute_heap_alignment(); } @@ -123,12 +118,23 @@ void ParallelArguments::initialize_heap_flags_and_sizes_one_pass() { void ParallelArguments::initialize_heap_flags_and_sizes() { initialize_heap_flags_and_sizes_one_pass(); + if (!UseLargePages) { + return; + } + + // If using large-page, need to update SpaceAlignment so that spaces are page-size aligned. const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old const size_t page_sz = os::page_size_for_region_aligned(MinHeapSize, min_pages); + if (page_sz == os::vm_page_size()) { + log_warning(gc, heap)("MinHeapSize (%zu) must be large enough for 4 * page-size; Disabling UseLargePages for heap", MinHeapSize); + return; + } + // Using largepage // Can a page size be something else than a power of two? assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2"); - size_t new_alignment = align_up(page_sz, SpaceAlignment); + // Space is largepage-aligned. + size_t new_alignment = page_sz; if (new_alignment != SpaceAlignment) { SpaceAlignment = new_alignment; // Redo everything from the start diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 2359ab9e1589e..5794d892557fc 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -64,7 +64,31 @@ GCPolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr; jint ParallelScavengeHeap::initialize() { const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes(); - ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment); + // If using largepage, SpaceAlignment is the desired largepage size. + size_t desired_page_size = (SpaceAlignment == default_space_alignment()) + ? os::vm_page_size() + : SpaceAlignment; + ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment, desired_page_size); + // Check if SpaceAlignment needs adjustment + if (UseLargePages) { + if (SpaceAlignment == default_space_alignment()) { + // Opted out of using largepage because MinHeapSize is too small. + assert(!is_aligned(SpaceAlignment, os::large_page_size()), "inv"); + assert(heap_rs.page_size() == os::vm_page_size(), "inv"); + } else { + // Opted in to using largepage + if (os::can_commit_large_page_memory()) { + // Keep SpaceAlignment as is so that largepage can be formed + } else { + // Explicit largepage; use actual pagesize or the default + SpaceAlignment = MAX2(heap_rs.page_size(), default_space_alignment()); + } + } + } else { + assert(heap_rs.page_size() == os::vm_page_size(), "inv"); + assert(SpaceAlignment == default_space_alignment(), "inv"); + } + assert(is_aligned(SpaceAlignment, heap_rs.page_size()), "inv"); trace_actual_reserved_page_size(reserved_heap_size, heap_rs); diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index b0e804edb709d..e835aeb3dd883 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -131,6 +131,11 @@ class ParallelScavengeHeap : public CollectedHeap { _gc_overhead_counter(0), _is_heap_almost_full(false) {} + // The alignment used for spaces in young gen and old gen + static size_t default_space_alignment() { + return 64 * K * HeapWordSize; + } + Name kind() const override { return CollectedHeap::Parallel; } diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index 89f22b72b6995..2d4b0698ad0c8 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -96,7 +96,7 @@ void PSOldGen::initialize_work() { // ObjectSpace stuff // - _object_space = new MutableSpace(virtual_space()->alignment()); + _object_space = new MutableSpace(virtual_space()->page_size()); object_space()->initialize(committed_mr, SpaceDecorator::Clear, SpaceDecorator::Mangle, diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.cpp b/src/hotspot/share/gc/parallel/psVirtualspace.cpp index 3be90b370d186..f4b24fa51af77 100644 --- a/src/hotspot/share/gc/parallel/psVirtualspace.cpp +++ b/src/hotspot/share/gc/parallel/psVirtualspace.cpp @@ -29,8 +29,8 @@ #include "utilities/align.hpp" PSVirtualSpace::PSVirtualSpace(ReservedSpace rs, size_t alignment) : - _alignment(alignment) -{ + _alignment(alignment), + _page_size(rs.page_size()) { set_reserved(rs); set_committed(reserved_low_addr(), reserved_low_addr()); DEBUG_ONLY(verify()); @@ -88,7 +88,8 @@ bool PSVirtualSpace::shrink_by(size_t bytes) { #ifndef PRODUCT void PSVirtualSpace::verify() const { - assert(is_aligned(_alignment, os::vm_page_size()), "bad alignment"); + assert(is_aligned(_page_size, os::vm_page_size()), "bad alignment"); + assert(is_aligned(_alignment, _page_size), "inv"); assert(is_aligned(reserved_low_addr(), _alignment), "bad reserved_low_addr"); assert(is_aligned(reserved_high_addr(), _alignment), "bad reserved_high_addr"); assert(is_aligned(committed_low_addr(), _alignment), "bad committed_low_addr"); diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.hpp b/src/hotspot/share/gc/parallel/psVirtualspace.hpp index a54a513a11753..98ef230f0daaa 100644 --- a/src/hotspot/share/gc/parallel/psVirtualspace.hpp +++ b/src/hotspot/share/gc/parallel/psVirtualspace.hpp @@ -41,6 +41,9 @@ class PSVirtualSpace : public CHeapObj { // ReservedSpace passed to initialize() must be aligned to this value. const size_t _alignment; + // OS page size used. If using transparent large pages, it's the ordinary page-size. + const size_t _page_size; + // Reserved area char* _reserved_low_addr; char* _reserved_high_addr; @@ -68,6 +71,7 @@ class PSVirtualSpace : public CHeapObj { // Accessors (all sizes are bytes). size_t alignment() const { return _alignment; } + size_t page_size() const { return _page_size; } char* reserved_low_addr() const { return _reserved_low_addr; } char* reserved_high_addr() const { return _reserved_high_addr; } char* committed_low_addr() const { return _committed_low_addr; } diff --git a/src/hotspot/share/gc/parallel/psYoungGen.cpp b/src/hotspot/share/gc/parallel/psYoungGen.cpp index 21d023f8207e5..9aafdf16ef7f1 100644 --- a/src/hotspot/share/gc/parallel/psYoungGen.cpp +++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp @@ -83,12 +83,12 @@ void PSYoungGen::initialize_work() { } if (UseNUMA) { - _eden_space = new MutableNUMASpace(virtual_space()->alignment()); + _eden_space = new MutableNUMASpace(virtual_space()->page_size()); } else { - _eden_space = new MutableSpace(virtual_space()->alignment()); + _eden_space = new MutableSpace(virtual_space()->page_size()); } - _from_space = new MutableSpace(virtual_space()->alignment()); - _to_space = new MutableSpace(virtual_space()->alignment()); + _from_space = new MutableSpace(virtual_space()->page_size()); + _to_space = new MutableSpace(virtual_space()->page_size()); // Generation Counters - generation 0, 3 subspaces _gen_counters = new GenerationCounters("new", 0, 3, min_gen_size(), diff --git a/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp b/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp index fa019aa5b4295..f5e7375fca1dd 100644 --- a/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp +++ b/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp @@ -40,6 +40,7 @@ /* Parallel GC fields */ \ /**********************/ \ nonstatic_field(PSVirtualSpace, _alignment, const size_t) \ + nonstatic_field(PSVirtualSpace, _page_size, const size_t) \ nonstatic_field(PSVirtualSpace, _reserved_low_addr, char*) \ nonstatic_field(PSVirtualSpace, _reserved_high_addr, char*) \ nonstatic_field(PSVirtualSpace, _committed_low_addr, char*) \ diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index bd47a054bc021..734fed27065b9 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -947,7 +947,7 @@ void Universe::initialize_tlab() { } } -ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { +ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment, size_t desired_page_size) { assert(alignment <= Arguments::conservative_max_heap_alignment(), "actual alignment %zu must be within maximum heap alignment %zu", @@ -958,15 +958,21 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), "heap size is too big for compressed oops"); - size_t page_size = os::vm_page_size(); - if (UseLargePages && is_aligned(alignment, os::large_page_size())) { - page_size = os::large_page_size(); + size_t page_size; + if (desired_page_size == 0) { + if (UseLargePages) { + page_size = os::large_page_size(); + } else { + page_size = os::vm_page_size(); + } } else { // Parallel is the only collector that might opt out of using large pages // for the heap. - assert(!UseLargePages || UseParallelGC , "Wrong alignment to use large pages"); + assert(UseParallelGC , "only Parallel"); + // Use caller provided value. + page_size = desired_page_size; } - + assert(is_aligned(heap_size, page_size), "inv"); // Now create the space. ReservedHeapSpace rhs = HeapReserver::reserve(total_reserved, alignment, page_size, AllocateHeapAt); diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp index 35c31330f082e..80b7224fb750b 100644 --- a/src/hotspot/share/memory/universe.hpp +++ b/src/hotspot/share/memory/universe.hpp @@ -301,7 +301,7 @@ class Universe: AllStatic { DEBUG_ONLY(static bool is_in_heap_or_null(const void* p) { return p == nullptr || is_in_heap(p); }) // Reserve Java heap and determine CompressedOops mode - static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment); + static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment, size_t desired_page_size = 0); // Global OopStorages static OopStorage* vm_weak();