3737#include " utilities/defaultStream.hpp"
3838#include " utilities/powerOfTwo.hpp"
3939
40- size_t ParallelArguments::conservative_max_heap_alignment () {
41- // The card marking array and the offset arrays for old generations are
42- // committed in os pages as well. Make sure they are entirely full (to
43- // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
44- // byte entry and the os page size is 4096, the maximum heap size should
45- // be 512*4096 = 2MB aligned.
40+ static size_t num_young_spaces () {
41+ // When using NUMA, we create one MutableNUMASpace for each NUMA node
42+ const size_t num_eden_spaces = UseNUMA ? os::numa_get_groups_num () : 1 ;
4643
47- size_t alignment = CardTable::ct_max_alignment_constraint ();
44+ // The young generation must have room for eden + two survivors
45+ return num_eden_spaces + 2 ;
46+ }
47+
48+ static size_t num_old_spaces () {
49+ return 1 ;
50+ }
51+
52+ void ParallelArguments::initialize_alignments () {
53+ // Initialize card size before initializing alignments
54+ CardTable::initialize_card_size ();
55+ const size_t card_table_alignment = CardTable::ct_max_alignment_constraint ();
56+ SpaceAlignment = ParallelScavengeHeap::default_space_alignment ();
4857
4958 if (UseLargePages) {
50- // In presence of large pages we have to make sure that our
51- // alignment is large page aware.
52- alignment = lcm (os::large_page_size (), alignment);
53- }
59+ const size_t total_spaces = num_young_spaces () + num_old_spaces ();
60+ const size_t page_size = os::page_size_for_region_unaligned (MaxHeapSize, total_spaces);
61+ ParallelScavengeHeap::set_desired_page_size (page_size);
5462
55- return alignment;
63+ if (page_size == os::vm_page_size ()) {
64+ log_warning (gc, heap)(" MaxHeapSize (%zu) must be large enough for %zu * page-size; Disabling UseLargePages for heap" ,
65+ MaxHeapSize, total_spaces);
66+ }
67+
68+ if (page_size > SpaceAlignment) {
69+ SpaceAlignment = page_size;
70+ }
71+
72+ HeapAlignment = lcm (page_size, card_table_alignment);
73+
74+ } else {
75+ assert (is_aligned (SpaceAlignment, os::vm_page_size ()), " " );
76+ ParallelScavengeHeap::set_desired_page_size (os::vm_page_size ());
77+ HeapAlignment = card_table_alignment;
78+ }
5679}
5780
5881void ParallelArguments::initialize () {
@@ -112,45 +135,26 @@ void ParallelArguments::initialize() {
112135 FullGCForwarding::initialize_flags (heap_reserved_size_bytes ());
113136}
114137
115- static size_t num_young_spaces () {
116- // When using NUMA, we create one MutableNUMASpace for each NUMA node
117- const size_t num_eden_spaces = UseNUMA ? os::numa_get_groups_num () : 1 ;
118-
119- // The young generation must have room for eden + two survivors
120- return num_eden_spaces + 2 ;
121- }
122-
123- static size_t num_old_spaces () {
124- return 1 ;
125- }
138+ size_t ParallelArguments::conservative_max_heap_alignment () {
139+ // The card marking array and the offset arrays for old generations are
140+ // committed in os pages as well. Make sure they are entirely full (to
141+ // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
142+ // byte entry and the os page size is 4096, the maximum heap size should
143+ // be 512*4096 = 2MB aligned.
126144
127- void ParallelArguments::initialize_alignments () {
128- // Initialize card size before initializing alignments
129- CardTable::initialize_card_size ();
130- const size_t card_table_alignment = CardTable::ct_max_alignment_constraint ();
131- SpaceAlignment = ParallelScavengeHeap::default_space_alignment ();
145+ size_t alignment = CardTable::ct_max_alignment_constraint ();
132146
133147 if (UseLargePages) {
134- const size_t total_spaces = num_young_spaces () + num_old_spaces ();
135- const size_t page_size = os::page_size_for_region_unaligned (MaxHeapSize, total_spaces);
136- ParallelScavengeHeap::set_desired_page_size (page_size);
137-
138- if (page_size == os::vm_page_size ()) {
139- log_warning (gc, heap)(" MaxHeapSize (%zu) must be large enough for %zu * page-size; Disabling UseLargePages for heap" ,
140- MaxHeapSize, total_spaces);
141- }
142-
143- if (page_size > SpaceAlignment) {
144- SpaceAlignment = page_size;
145- }
148+ // In presence of large pages we have to make sure that our
149+ // alignment is large page aware.
150+ alignment = lcm (os::large_page_size (), alignment);
151+ }
146152
147- HeapAlignment = lcm (page_size, card_table_alignment);
153+ return alignment;
154+ }
148155
149- } else {
150- assert (is_aligned (SpaceAlignment, os::vm_page_size ()), " " );
151- ParallelScavengeHeap::set_desired_page_size (os::vm_page_size ());
152- HeapAlignment = card_table_alignment;
153- }
156+ CollectedHeap* ParallelArguments::create_heap () {
157+ return new ParallelScavengeHeap ();
154158}
155159
156160size_t ParallelArguments::young_gen_size_lower_bound () {
@@ -164,7 +168,3 @@ size_t ParallelArguments::old_gen_size_lower_bound() {
164168size_t ParallelArguments::heap_reserved_size_bytes () {
165169 return MaxHeapSize;
166170}
167-
168- CollectedHeap* ParallelArguments::create_heap () {
169- return new ParallelScavengeHeap ();
170- }
0 commit comments