@@ -227,11 +227,6 @@ void CodeCache::initialize_heaps() {
227227
228228 if (!non_nmethod.set ) {
229229 non_nmethod.size += compiler_buffer_size;
230- // Further down, just before FLAG_SET_ERGO(), all segment sizes are
231- // aligned down to the next lower multiple of min_size. For large page
232- // sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
233- // Therefore, force non_nmethod.size to at least min_size.
234- non_nmethod.size = MAX2 (non_nmethod.size , min_size);
235230 }
236231
237232 if (!profiled.set && !non_profiled.set ) {
@@ -307,11 +302,10 @@ void CodeCache::initialize_heaps() {
307302
308303 // Note: if large page support is enabled, min_size is at least the large
309304 // page size. This ensures that the code cache is covered by large pages.
310- non_profiled.size += non_nmethod.size & alignment_mask (min_size);
311- non_profiled.size += profiled.size & alignment_mask (min_size);
312- non_nmethod.size = align_down (non_nmethod.size , min_size);
313- profiled.size = align_down (profiled.size , min_size);
314- non_profiled.size = align_down (non_profiled.size , min_size);
305+ non_nmethod.size = align_up (non_nmethod.size , min_size);
306+ profiled.size = align_up (profiled.size , min_size);
307+ non_profiled.size = align_up (non_profiled.size , min_size);
308+ cache_size = non_nmethod.size + profiled.size + non_profiled.size ;
315309
316310 FLAG_SET_ERGO (NonNMethodCodeHeapSize, non_nmethod.size );
317311 FLAG_SET_ERGO (ProfiledCodeHeapSize, profiled.size );
0 commit comments