Skip to content

Commit 28ed52d

Browse files
author
Thomas Schatzl
committed
8350621: Code cache stops scheduling GC
Reviewed-by: shade Backport-of: 2292246f8c11f735f50e2046ec6606e89289e9f5
1 parent a61a852 commit 28ed52d

File tree

8 files changed

+348
-52
lines changed

8 files changed

+348
-52
lines changed

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Lines changed: 103 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1690,6 +1690,66 @@ static bool gc_counter_less_than(uint x, uint y) {
16901690
#define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
16911691
LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
16921692

1693+
bool G1CollectedHeap::wait_full_mark_finished(GCCause::Cause cause,
1694+
uint old_marking_started_before,
1695+
uint old_marking_started_after,
1696+
uint old_marking_completed_after) {
1697+
// Request is finished if a full collection (concurrent or stw)
1698+
// was started after this request and has completed, e.g.
1699+
// started_before < completed_after.
1700+
if (gc_counter_less_than(old_marking_started_before,
1701+
old_marking_completed_after)) {
1702+
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
1703+
return true;
1704+
}
1705+
1706+
if (old_marking_started_after != old_marking_completed_after) {
1707+
// If there is an in-progress cycle (possibly started by us), then
1708+
// wait for that cycle to complete, e.g.
1709+
// while completed_now < started_after.
1710+
LOG_COLLECT_CONCURRENTLY(cause, "wait");
1711+
MonitorLocker ml(G1OldGCCount_lock);
1712+
while (gc_counter_less_than(_old_marking_cycles_completed,
1713+
old_marking_started_after)) {
1714+
ml.wait();
1715+
}
1716+
// Request is finished if the collection we just waited for was
1717+
// started after this request.
1718+
if (old_marking_started_before != old_marking_started_after) {
1719+
LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
1720+
return true;
1721+
}
1722+
}
1723+
return false;
1724+
}
1725+
1726+
// After calling wait_full_mark_finished(), this method determines whether we
1727+
// previously failed for ordinary reasons (concurrent cycle in progress, whitebox
1728+
// has control). Returns if this has been such an ordinary reason.
1729+
static bool should_retry_vm_op(GCCause::Cause cause,
1730+
VM_G1TryInitiateConcMark* op) {
1731+
if (op->cycle_already_in_progress()) {
1732+
// If VMOp failed because a cycle was already in progress, it
1733+
// is now complete. But it didn't finish this user-requested
1734+
// GC, so try again.
1735+
LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
1736+
return true;
1737+
} else if (op->whitebox_attached()) {
1738+
// If WhiteBox wants control, wait for notification of a state
1739+
// change in the controller, then try again. Don't wait for
1740+
// release of control, since collections may complete while in
1741+
// control. Note: This won't recognize a STW full collection
1742+
// while waiting; we can't wait on multiple monitors.
1743+
LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall");
1744+
MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
1745+
if (ConcurrentGCBreakpoints::is_controlled()) {
1746+
ml.wait();
1747+
}
1748+
return true;
1749+
}
1750+
return false;
1751+
}
1752+
16931753
bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
16941754
uint gc_counter,
16951755
uint old_marking_started_before) {
@@ -1750,7 +1810,45 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
17501810
LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC");
17511811
old_marking_started_before = old_marking_started_after;
17521812
}
1813+
} else if (GCCause::is_codecache_requested_gc(cause)) {
1814+
// For a CodeCache requested GC, before marking, progress is ensured as the
1815+
// following Remark pause unloads code (and signals the requester such).
1816+
// Otherwise we must ensure that it is restarted.
1817+
//
1818+
// For a CodeCache requested GC, a successful GC operation means that
1819+
// (1) marking is in progress. I.e. the VMOp started the marking or a
1820+
// Remark pause is pending from a different VM op; we will potentially
1821+
// abort a mixed phase if needed.
1822+
// (2) a new cycle was started (by this thread or some other), or
1823+
// (3) a Full GC was performed.
1824+
//
1825+
// Cases (2) and (3) are detected together by a change to
1826+
// _old_marking_cycles_started.
1827+
//
1828+
// Compared to other "automatic" GCs (see below), we do not consider being
1829+
// in whitebox as sufficient too because we might be anywhere within that
1830+
// cycle and we need to make progress.
1831+
if (op.mark_in_progress() ||
1832+
(old_marking_started_before != old_marking_started_after)) {
1833+
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
1834+
return true;
1835+
}
1836+
1837+
if (wait_full_mark_finished(cause,
1838+
old_marking_started_before,
1839+
old_marking_started_after,
1840+
old_marking_completed_after)) {
1841+
return true;
1842+
}
1843+
1844+
if (should_retry_vm_op(cause, &op)) {
1845+
continue;
1846+
}
17531847
} else if (!GCCause::is_user_requested_gc(cause)) {
1848+
assert(cause == GCCause::_g1_humongous_allocation ||
1849+
cause == GCCause::_g1_periodic_collection,
1850+
"Unsupported cause %s", GCCause::to_string(cause));
1851+
17541852
// For an "automatic" (not user-requested) collection, we just need to
17551853
// ensure that progress is made.
17561854
//
@@ -1762,11 +1860,6 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
17621860
// (5) a Full GC was performed.
17631861
// Cases (4) and (5) are detected together by a change to
17641862
// _old_marking_cycles_started.
1765-
//
1766-
// Note that (1) does not imply (4). If we're still in the mixed
1767-
// phase of an earlier concurrent collection, the request to make the
1768-
// collection a concurrent start won't be honored. If we don't check for
1769-
// both conditions we'll spin doing back-to-back collections.
17701863
if (op.gc_succeeded() ||
17711864
op.cycle_already_in_progress() ||
17721865
op.whitebox_attached() ||
@@ -1790,56 +1883,20 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
17901883
BOOL_TO_STR(op.gc_succeeded()),
17911884
old_marking_started_before, old_marking_started_after);
17921885

1793-
// Request is finished if a full collection (concurrent or stw)
1794-
// was started after this request and has completed, e.g.
1795-
// started_before < completed_after.
1796-
if (gc_counter_less_than(old_marking_started_before,
1797-
old_marking_completed_after)) {
1798-
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
1886+
if (wait_full_mark_finished(cause,
1887+
old_marking_started_before,
1888+
old_marking_started_after,
1889+
old_marking_completed_after)) {
17991890
return true;
18001891
}
18011892

1802-
if (old_marking_started_after != old_marking_completed_after) {
1803-
// If there is an in-progress cycle (possibly started by us), then
1804-
// wait for that cycle to complete, e.g.
1805-
// while completed_now < started_after.
1806-
LOG_COLLECT_CONCURRENTLY(cause, "wait");
1807-
MonitorLocker ml(G1OldGCCount_lock);
1808-
while (gc_counter_less_than(_old_marking_cycles_completed,
1809-
old_marking_started_after)) {
1810-
ml.wait();
1811-
}
1812-
// Request is finished if the collection we just waited for was
1813-
// started after this request.
1814-
if (old_marking_started_before != old_marking_started_after) {
1815-
LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
1816-
return true;
1817-
}
1818-
}
1819-
18201893
// If VMOp was successful then it started a new cycle that the above
18211894
// wait &etc should have recognized as finishing this request. This
18221895
// differs from a non-user-request, where gc_succeeded does not imply
18231896
// a new cycle was started.
18241897
assert(!op.gc_succeeded(), "invariant");
18251898

1826-
if (op.cycle_already_in_progress()) {
1827-
// If VMOp failed because a cycle was already in progress, it
1828-
// is now complete. But it didn't finish this user-requested
1829-
// GC, so try again.
1830-
LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
1831-
continue;
1832-
} else if (op.whitebox_attached()) {
1833-
// If WhiteBox wants control, wait for notification of a state
1834-
// change in the controller, then try again. Don't wait for
1835-
// release of control, since collections may complete while in
1836-
// control. Note: This won't recognize a STW full collection
1837-
// while waiting; we can't wait on multiple monitors.
1838-
LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall");
1839-
MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
1840-
if (ConcurrentGCBreakpoints::is_controlled()) {
1841-
ml.wait();
1842-
}
1899+
if (should_retry_vm_op(cause, &op)) {
18431900
continue;
18441901
}
18451902
}

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,14 @@ class G1CollectedHeap : public CollectedHeap {
274274
// (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
275275
bool should_do_concurrent_full_gc(GCCause::Cause cause);
276276

277+
// Wait until a full mark (either currently in progress or one that completed
278+
// after the current request) has finished. Returns whether that full mark started
279+
// after this request. If so, we typically do not need another one.
280+
bool wait_full_mark_finished(GCCause::Cause cause,
281+
uint old_marking_started_before,
282+
uint old_marking_started_after,
283+
uint old_marking_completed_after);
284+
277285
// Attempt to start a concurrent cycle with the indicated cause.
278286
// precondition: should_do_concurrent_full_gc(cause)
279287
bool try_collect_concurrently(GCCause::Cause cause,

src/hotspot/share/gc/g1/g1CollectorState.hpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,9 @@ class G1CollectorState {
6060
// do the concurrent start phase work.
6161
volatile bool _initiate_conc_mark_if_possible;
6262

63+
// Marking is in progress. Set from start of the concurrent start pause to the
64+
// end of the Remark pause.
65+
bool _mark_in_progress;
6366
// Marking or rebuilding remembered set work is in progress. Set from the end
6467
// of the concurrent start pause to the end of the Cleanup pause.
6568
bool _mark_or_rebuild_in_progress;
@@ -78,6 +81,7 @@ class G1CollectorState {
7881
_in_concurrent_start_gc(false),
7982
_initiate_conc_mark_if_possible(false),
8083

84+
_mark_in_progress(false),
8185
_mark_or_rebuild_in_progress(false),
8286
_clearing_bitmap(false),
8387
_in_full_gc(false) { }
@@ -92,6 +96,7 @@ class G1CollectorState {
9296

9397
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
9498

99+
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
95100
void set_mark_or_rebuild_in_progress(bool v) { _mark_or_rebuild_in_progress = v; }
96101
void set_clearing_bitmap(bool v) { _clearing_bitmap = v; }
97102

@@ -106,6 +111,7 @@ class G1CollectorState {
106111

107112
bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; }
108113

114+
bool mark_in_progress() const { return _mark_in_progress; }
109115
bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; }
110116
bool clearing_bitmap() const { return _clearing_bitmap; }
111117

src/hotspot/share/gc/g1/g1Policy.cpp

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -593,6 +593,7 @@ void G1Policy::record_full_collection_end() {
593593
collector_state()->set_in_young_gc_before_mixed(false);
594594
collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC"));
595595
collector_state()->set_in_concurrent_start_gc(false);
596+
collector_state()->set_mark_in_progress(false);
596597
collector_state()->set_mark_or_rebuild_in_progress(false);
597598
collector_state()->set_clearing_bitmap(false);
598599

@@ -704,6 +705,7 @@ void G1Policy::record_concurrent_mark_remark_end() {
704705
double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
705706
_analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
706707
record_pause(G1GCPauseType::Remark, _mark_remark_start_sec, end_time_sec);
708+
collector_state()->set_mark_in_progress(false);
707709
}
708710

709711
void G1Policy::record_concurrent_mark_cleanup_start() {
@@ -941,6 +943,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
941943
assert(!(G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
942944
"If the last pause has been concurrent start, we should not have been in the marking window");
943945
if (G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause)) {
946+
collector_state()->set_mark_in_progress(concurrent_operation_is_full_mark);
944947
collector_state()->set_mark_or_rebuild_in_progress(concurrent_operation_is_full_mark);
945948
}
946949

@@ -1227,6 +1230,17 @@ void G1Policy::initiate_conc_mark() {
12271230
collector_state()->set_initiate_conc_mark_if_possible(false);
12281231
}
12291232

1233+
static const char* requester_for_mixed_abort(GCCause::Cause cause) {
1234+
if (cause == GCCause::_wb_breakpoint) {
1235+
return "run_to breakpoint";
1236+
} else if (GCCause::is_codecache_requested_gc(cause)) {
1237+
return "codecache";
1238+
} else {
1239+
assert(G1CollectedHeap::heap()->is_user_requested_concurrent_full_gc(cause), "must be");
1240+
return "user";
1241+
}
1242+
}
1243+
12301244
void G1Policy::decide_on_concurrent_start_pause() {
12311245
// We are about to decide on whether this pause will be a
12321246
// concurrent start pause.
@@ -1259,8 +1273,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
12591273
initiate_conc_mark();
12601274
log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
12611275
} else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
1262-
(cause == GCCause::_codecache_GC_threshold) ||
1263-
(cause == GCCause::_codecache_GC_aggressive) ||
1276+
GCCause::is_codecache_requested_gc(cause) ||
12641277
(cause == GCCause::_wb_breakpoint)) {
12651278
// Initiate a concurrent start. A concurrent start must be a young only
12661279
// GC, so the collector state must be updated to reflect this.
@@ -1275,7 +1288,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
12751288
abort_time_to_mixed_tracking();
12761289
initiate_conc_mark();
12771290
log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
1278-
(cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user");
1291+
requester_for_mixed_abort(cause));
12791292
} else {
12801293
// The concurrent marking thread is still finishing up the
12811294
// previous cycle. If we start one right now the two cycles

src/hotspot/share/gc/g1/g1VMOperations.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
5959
GCCause::Cause gc_cause) :
6060
VM_GC_Operation(gc_count_before, gc_cause),
6161
_transient_failure(false),
62+
_mark_in_progress(false),
6263
_cycle_already_in_progress(false),
6364
_whitebox_attached(false),
6465
_terminating(false),
@@ -83,6 +84,9 @@ void VM_G1TryInitiateConcMark::doit() {
8384
// Record for handling by caller.
8485
_terminating = g1h->concurrent_mark_is_terminating();
8586

87+
_mark_in_progress = g1h->collector_state()->mark_in_progress();
88+
_cycle_already_in_progress = g1h->concurrent_mark()->cm_thread()->in_progress();
89+
8690
if (_terminating && GCCause::is_user_requested_gc(_gc_cause)) {
8791
// When terminating, the request to initiate a concurrent cycle will be
8892
// ignored by do_collection_pause_at_safepoint; instead it will just do
@@ -91,9 +95,8 @@ void VM_G1TryInitiateConcMark::doit() {
9195
// requests the alternative GC might still be needed.
9296
} else if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) {
9397
// Failure to force the next GC pause to be a concurrent start indicates
94-
// there is already a concurrent marking cycle in progress. Set flag
95-
// to notify the caller and return immediately.
96-
_cycle_already_in_progress = true;
98+
// there is already a concurrent marking cycle in progress. Flags to indicate
99+
// that were already set, so return immediately.
97100
} else if ((_gc_cause != GCCause::_wb_breakpoint) &&
98101
ConcurrentGCBreakpoints::is_controlled()) {
99102
// WhiteBox wants to be in control of concurrent cycles, so don't try to

src/hotspot/share/gc/g1/g1VMOperations.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ class VM_G1CollectFull : public VM_GC_Operation {
4545

4646
class VM_G1TryInitiateConcMark : public VM_GC_Operation {
4747
bool _transient_failure;
48+
bool _mark_in_progress;
4849
bool _cycle_already_in_progress;
4950
bool _whitebox_attached;
5051
bool _terminating;
@@ -59,6 +60,7 @@ class VM_G1TryInitiateConcMark : public VM_GC_Operation {
5960
virtual bool doit_prologue();
6061
virtual void doit();
6162
bool transient_failure() const { return _transient_failure; }
63+
bool mark_in_progress() const { return _mark_in_progress; }
6264
bool cycle_already_in_progress() const { return _cycle_already_in_progress; }
6365
bool whitebox_attached() const { return _whitebox_attached; }
6466
bool terminating() const { return _terminating; }

src/hotspot/share/gc/shared/gcCause.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,11 @@ class GCCause : public AllStatic {
106106
cause == GCCause::_heap_dump);
107107
}
108108

109+
inline static bool is_codecache_requested_gc(GCCause::Cause cause) {
110+
return (cause == _codecache_GC_threshold ||
111+
cause == _codecache_GC_aggressive);
112+
}
113+
109114
// Causes for collection of the tenured gernation
110115
inline static bool is_tenured_allocation_failure_gc(GCCause::Cause cause) {
111116
// _allocation_failure is the generic cause a collection which could result

0 commit comments

Comments
 (0)