Skip to content

Commit d3a008f

Browse files
committed
8212084: G1: Implement UseGCOverheadLimit
Reviewed-by: phh, rrich Backport-of: 3d2ce80
1 parent 101e8ed commit d3a008f

File tree

4 files changed

+186
-16
lines changed

4 files changed

+186
-16
lines changed

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Lines changed: 76 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include "code/codeCache.hpp"
2929
#include "compiler/oopMap.hpp"
3030
#include "gc/g1/g1Allocator.inline.hpp"
31+
#include "gc/g1/g1Analytics.hpp"
3132
#include "gc/g1/g1Arguments.hpp"
3233
#include "gc/g1/g1BarrierSet.hpp"
3334
#include "gc/g1/g1BatchedTask.hpp"
@@ -452,8 +453,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_
452453
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating %zu words",
453454
Thread::current()->name(), word_size);
454455

456+
// Has the gc overhead limit been reached in the meantime? If so, this mutator
457+
// should receive null even when unsuccessfully scheduling a collection as well
458+
// for global consistency.
459+
if (gc_overhead_limit_exceeded()) {
460+
return nullptr;
461+
}
462+
455463
// We can reach here if we were unsuccessful in scheduling a collection (because
456-
// another thread beat us to it). In this case immeditealy retry the allocation
464+
// another thread beat us to it). In this case immediately retry the allocation
457465
// attempt because another thread successfully performed a collection and possibly
458466
// reclaimed enough space. The first attempt (without holding the Heap_lock) is
459467
// here and the follow-on attempt will be at the start of the next loop
@@ -695,6 +703,13 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
695703
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating %zu",
696704
Thread::current()->name(), word_size);
697705

706+
// Has the gc overhead limit been reached in the meantime? If so, this mutator
707+
// should receive null even when unsuccessfully scheduling a collection as well
708+
// for global consistency.
709+
if (gc_overhead_limit_exceeded()) {
710+
return nullptr;
711+
}
712+
698713
// We can reach here if we were unsuccessful in scheduling a collection (because
699714
// another thread beat us to it).
700715
// Humongous object allocation always needs a lock, so we wait for the retry
@@ -897,25 +912,62 @@ void G1CollectedHeap::resize_heap_if_necessary(size_t allocation_word_size) {
897912
}
898913
}
899914

915+
void G1CollectedHeap::update_gc_overhead_counter() {
916+
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
917+
918+
if (!UseGCOverheadLimit) {
919+
return;
920+
}
921+
922+
bool gc_time_over_limit = (_policy->analytics()->long_term_pause_time_ratio() * 100) >= GCTimeLimit;
923+
double free_space_percent = percent_of(num_available_regions() * G1HeapRegion::GrainBytes, max_capacity());
924+
bool free_space_below_limit = free_space_percent < GCHeapFreeLimit;
925+
926+
log_debug(gc)("GC Overhead Limit: GC Time %f Free Space %f Counter %zu",
927+
(_policy->analytics()->long_term_pause_time_ratio() * 100),
928+
free_space_percent,
929+
_gc_overhead_counter);
930+
931+
if (gc_time_over_limit && free_space_below_limit) {
932+
_gc_overhead_counter++;
933+
} else {
934+
_gc_overhead_counter = 0;
935+
}
936+
}
937+
938+
bool G1CollectedHeap::gc_overhead_limit_exceeded() {
939+
return _gc_overhead_counter >= GCOverheadLimitThreshold;
940+
}
941+
900942
HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
901943
bool do_gc,
902944
bool maximal_compaction,
903945
bool expect_null_mutator_alloc_region) {
904-
// Let's attempt the allocation first.
905-
HeapWord* result =
906-
attempt_allocation_at_safepoint(word_size,
907-
expect_null_mutator_alloc_region);
908-
if (result != nullptr) {
909-
return result;
910-
}
946+
// Skip allocation if GC overhead limit has been exceeded to let the mutator run
947+
// into an OOME. It can either exit "gracefully" or try to free up memory asap.
948+
// For the latter situation, keep running GCs. If the mutator frees up enough
949+
// memory quickly enough, the overhead(s) will go below the threshold(s) again
950+
// and the VM may continue running.
951+
// If we did not continue garbage collections, the (gc overhead) limit may decrease
952+
// enough by itself to not count as exceeding the limit any more, in the worst
953+
// case bouncing back-and-forth all the time.
954+
if (!gc_overhead_limit_exceeded()) {
955+
// Let's attempt the allocation first.
956+
HeapWord* result =
957+
attempt_allocation_at_safepoint(word_size,
958+
expect_null_mutator_alloc_region);
959+
if (result != nullptr) {
960+
return result;
961+
}
911962

912-
// In a G1 heap, we're supposed to keep allocation from failing by
913-
// incremental pauses. Therefore, at least for now, we'll favor
914-
// expansion over collection. (This might change in the future if we can
915-
// do something smarter than full collection to satisfy a failed alloc.)
916-
result = expand_and_allocate(word_size);
917-
if (result != nullptr) {
918-
return result;
963+
// In a G1 heap, we're supposed to keep allocation from failing by
964+
// incremental pauses. Therefore, at least for now, we'll favor
965+
// expansion over collection. (This might change in the future if we can
966+
// do something smarter than full collection to satisfy a failed alloc.)
967+
result = expand_and_allocate(word_size);
968+
if (result != nullptr) {
969+
return result;
970+
}
919971
}
920972

921973
if (do_gc) {
@@ -939,6 +991,10 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
939991
HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
940992
assert_at_safepoint_on_vm_thread();
941993

994+
// Update GC overhead limits after the initial garbage collection leading to this
995+
// allocation attempt.
996+
update_gc_overhead_counter();
997+
942998
// Attempts to allocate followed by Full GC.
943999
HeapWord* result =
9441000
satisfy_failed_allocation_helper(word_size,
@@ -973,6 +1029,10 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
9731029
assert(!soft_ref_policy()->should_clear_all_soft_refs(),
9741030
"Flag should have been handled and cleared prior to this point");
9751031

1032+
if (gc_overhead_limit_exceeded()) {
1033+
log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
1034+
}
1035+
9761036
// What else? We might try synchronous finalization later. If the total
9771037
// space available is large enough for the allocation, then a more
9781038
// complete compaction phase than we've tried so far might be
@@ -1138,6 +1198,7 @@ class HumongousRegionSetChecker : public G1HeapRegionSetChecker {
11381198

11391199
G1CollectedHeap::G1CollectedHeap() :
11401200
CollectedHeap(),
1201+
_gc_overhead_counter(0),
11411202
_service_thread(nullptr),
11421203
_periodic_gc_task(nullptr),
11431204
_free_arena_memory_task(nullptr),

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,17 @@ class G1CollectedHeap : public CollectedHeap {
169169
friend class G1CheckRegionAttrTableClosure;
170170

171171
private:
172+
// GC Overhead Limit functionality related members.
173+
//
174+
// The goal is to return null for allocations prematurely (before really going
175+
// OOME) in case both GC CPU usage (>= GCTimeLimit) and not much available free
176+
// memory (<= GCHeapFreeLimit) so that applications can exit gracefully or try
177+
// to keep running by easing off memory.
178+
uintx _gc_overhead_counter; // The number of consecutive garbage collections we were over the limits.
179+
180+
void update_gc_overhead_counter();
181+
bool gc_overhead_limit_exceeded();
182+
172183
G1ServiceThread* _service_thread;
173184
G1ServiceTask* _periodic_gc_task;
174185
G1MonotonicArenaFreeMemoryTask* _free_arena_memory_task;

src/hotspot/share/gc/shared/gc_globals.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -414,7 +414,7 @@
414414
"Initial ratio of young generation/survivor space size") \
415415
range(3, max_uintx) \
416416
\
417-
product(bool, UseGCOverheadLimit, true, \
417+
product(bool, UseGCOverheadLimit, falseInDebug, \
418418
"Use policy to limit of proportion of time spent in GC " \
419419
"before an OutOfMemory error is thrown") \
420420
\
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
/*
2+
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*/
23+
24+
package gc;
25+
26+
/*
27+
* @test id=Parallel
28+
* @requires vm.gc.Parallel & false
29+
* @requires !vm.debug
30+
* @summary Verifies that the UseGCOverheadLimit functionality works in Parallel GC.
31+
* @library /test/lib
32+
* @run driver gc.TestUseGCOverheadLimit Parallel
33+
*/
34+
35+
/*
36+
* @test id=G1
37+
* @requires vm.gc.G1
38+
* @requires !vm.debug
39+
* @summary Verifies that the UseGCOverheadLimit functionality works in G1 GC.
40+
* @library /test/lib
41+
* @run driver gc.TestUseGCOverheadLimit G1
42+
*/
43+
44+
import java.util.Arrays;
45+
import java.util.stream.Stream;
46+
47+
import jdk.test.lib.process.OutputAnalyzer;
48+
import jdk.test.lib.process.ProcessTools;
49+
50+
public class TestUseGCOverheadLimit {
51+
public static void main(String args[]) throws Exception {
52+
String[] parallelArgs = {
53+
"-XX:+UseParallelGC",
54+
"-XX:NewSize=122m",
55+
"-XX:SurvivorRatio=99",
56+
"-XX:GCHeapFreeLimit=10"
57+
};
58+
String[] g1Args = {
59+
"-XX:+UseG1GC",
60+
"-XX:GCHeapFreeLimit=5"
61+
};
62+
63+
String[] selectedArgs = args[0].equals("G1") ? g1Args : parallelArgs;
64+
65+
final String[] commonArgs = {
66+
"-XX:-UseCompactObjectHeaders", // Object sizes are calculated such that the heap is tight.
67+
"-XX:ParallelGCThreads=1", // Make GCs take longer.
68+
"-XX:+UseGCOverheadLimit",
69+
"-Xlog:gc=debug",
70+
"-XX:GCTimeLimit=90", // Ease the CPU requirement a little.
71+
"-Xmx128m",
72+
Allocating.class.getName()
73+
};
74+
75+
String[] vmArgs = Stream.concat(Arrays.stream(selectedArgs), Arrays.stream(commonArgs)).toArray(String[]::new);
76+
OutputAnalyzer output = ProcessTools.executeLimitedTestJava(vmArgs);
77+
output.shouldNotHaveExitValue(0);
78+
79+
System.out.println(output.getStdout());
80+
81+
output.stdoutShouldContain("GC Overhead Limit exceeded too often (5).");
82+
}
83+
84+
static class Allocating {
85+
public static void main(String[] args) {
86+
Object[] cache = new Object[1024 * 1024 * 2];
87+
88+
// Allocate random objects, keeping around data, causing garbage
89+
// collections.
90+
for (int i = 0; i < 1024* 1024 * 30; i++) {
91+
Object[] obj = new Object[10];
92+
cache[i % cache.length] = obj;
93+
}
94+
95+
System.out.println(cache);
96+
}
97+
}
98+
}

0 commit comments

Comments
 (0)