Skip to content

Commit 2292246

Browse files
Thomas Schatzlajacob
andcommitted
8350621: Code cache stops scheduling GC
Co-authored-by: Thomas Schatzl <[email protected]> Co-authored-by: Alexandre Jacob <[email protected]> Reviewed-by: kbarrett, ayang
1 parent 03e9ea1 commit 2292246

File tree

8 files changed

+348
-52
lines changed

8 files changed

+348
-52
lines changed

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Lines changed: 103 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1732,6 +1732,66 @@ static bool gc_counter_less_than(uint x, uint y) {
17321732
#define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
17331733
LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
17341734

1735+
bool G1CollectedHeap::wait_full_mark_finished(GCCause::Cause cause,
1736+
uint old_marking_started_before,
1737+
uint old_marking_started_after,
1738+
uint old_marking_completed_after) {
1739+
// Request is finished if a full collection (concurrent or stw)
1740+
// was started after this request and has completed, e.g.
1741+
// started_before < completed_after.
1742+
if (gc_counter_less_than(old_marking_started_before,
1743+
old_marking_completed_after)) {
1744+
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
1745+
return true;
1746+
}
1747+
1748+
if (old_marking_started_after != old_marking_completed_after) {
1749+
// If there is an in-progress cycle (possibly started by us), then
1750+
// wait for that cycle to complete, e.g.
1751+
// while completed_now < started_after.
1752+
LOG_COLLECT_CONCURRENTLY(cause, "wait");
1753+
MonitorLocker ml(G1OldGCCount_lock);
1754+
while (gc_counter_less_than(_old_marking_cycles_completed,
1755+
old_marking_started_after)) {
1756+
ml.wait();
1757+
}
1758+
// Request is finished if the collection we just waited for was
1759+
// started after this request.
1760+
if (old_marking_started_before != old_marking_started_after) {
1761+
LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
1762+
return true;
1763+
}
1764+
}
1765+
return false;
1766+
}
1767+
1768+
// After calling wait_full_mark_finished(), this method determines whether we
1769+
// previously failed for ordinary reasons (concurrent cycle in progress, whitebox
1770+
// has control). Returns if this has been such an ordinary reason.
1771+
static bool should_retry_vm_op(GCCause::Cause cause,
1772+
VM_G1TryInitiateConcMark* op) {
1773+
if (op->cycle_already_in_progress()) {
1774+
// If VMOp failed because a cycle was already in progress, it
1775+
// is now complete. But it didn't finish this user-requested
1776+
// GC, so try again.
1777+
LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
1778+
return true;
1779+
} else if (op->whitebox_attached()) {
1780+
// If WhiteBox wants control, wait for notification of a state
1781+
// change in the controller, then try again. Don't wait for
1782+
// release of control, since collections may complete while in
1783+
// control. Note: This won't recognize a STW full collection
1784+
// while waiting; we can't wait on multiple monitors.
1785+
LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall");
1786+
MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
1787+
if (ConcurrentGCBreakpoints::is_controlled()) {
1788+
ml.wait();
1789+
}
1790+
return true;
1791+
}
1792+
return false;
1793+
}
1794+
17351795
bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
17361796
uint gc_counter,
17371797
uint old_marking_started_before) {
@@ -1792,7 +1852,45 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
17921852
LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC");
17931853
old_marking_started_before = old_marking_started_after;
17941854
}
1855+
} else if (GCCause::is_codecache_requested_gc(cause)) {
1856+
// For a CodeCache requested GC, before marking, progress is ensured as the
1857+
// following Remark pause unloads code (and signals the requester such).
1858+
// Otherwise we must ensure that it is restarted.
1859+
//
1860+
// For a CodeCache requested GC, a successful GC operation means that
1861+
// (1) marking is in progress. I.e. the VMOp started the marking or a
1862+
// Remark pause is pending from a different VM op; we will potentially
1863+
// abort a mixed phase if needed.
1864+
// (2) a new cycle was started (by this thread or some other), or
1865+
// (3) a Full GC was performed.
1866+
//
1867+
// Cases (2) and (3) are detected together by a change to
1868+
// _old_marking_cycles_started.
1869+
//
1870+
// Compared to other "automatic" GCs (see below), we do not consider being
1871+
// in whitebox as sufficient too because we might be anywhere within that
1872+
// cycle and we need to make progress.
1873+
if (op.mark_in_progress() ||
1874+
(old_marking_started_before != old_marking_started_after)) {
1875+
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
1876+
return true;
1877+
}
1878+
1879+
if (wait_full_mark_finished(cause,
1880+
old_marking_started_before,
1881+
old_marking_started_after,
1882+
old_marking_completed_after)) {
1883+
return true;
1884+
}
1885+
1886+
if (should_retry_vm_op(cause, &op)) {
1887+
continue;
1888+
}
17951889
} else if (!GCCause::is_user_requested_gc(cause)) {
1890+
assert(cause == GCCause::_g1_humongous_allocation ||
1891+
cause == GCCause::_g1_periodic_collection,
1892+
"Unsupported cause %s", GCCause::to_string(cause));
1893+
17961894
// For an "automatic" (not user-requested) collection, we just need to
17971895
// ensure that progress is made.
17981896
//
@@ -1804,11 +1902,6 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
18041902
// (5) a Full GC was performed.
18051903
// Cases (4) and (5) are detected together by a change to
18061904
// _old_marking_cycles_started.
1807-
//
1808-
// Note that (1) does not imply (4). If we're still in the mixed
1809-
// phase of an earlier concurrent collection, the request to make the
1810-
// collection a concurrent start won't be honored. If we don't check for
1811-
// both conditions we'll spin doing back-to-back collections.
18121905
if (op.gc_succeeded() ||
18131906
op.cycle_already_in_progress() ||
18141907
op.whitebox_attached() ||
@@ -1832,56 +1925,20 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
18321925
BOOL_TO_STR(op.gc_succeeded()),
18331926
old_marking_started_before, old_marking_started_after);
18341927

1835-
// Request is finished if a full collection (concurrent or stw)
1836-
// was started after this request and has completed, e.g.
1837-
// started_before < completed_after.
1838-
if (gc_counter_less_than(old_marking_started_before,
1839-
old_marking_completed_after)) {
1840-
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
1928+
if (wait_full_mark_finished(cause,
1929+
old_marking_started_before,
1930+
old_marking_started_after,
1931+
old_marking_completed_after)) {
18411932
return true;
18421933
}
18431934

1844-
if (old_marking_started_after != old_marking_completed_after) {
1845-
// If there is an in-progress cycle (possibly started by us), then
1846-
// wait for that cycle to complete, e.g.
1847-
// while completed_now < started_after.
1848-
LOG_COLLECT_CONCURRENTLY(cause, "wait");
1849-
MonitorLocker ml(G1OldGCCount_lock);
1850-
while (gc_counter_less_than(_old_marking_cycles_completed,
1851-
old_marking_started_after)) {
1852-
ml.wait();
1853-
}
1854-
// Request is finished if the collection we just waited for was
1855-
// started after this request.
1856-
if (old_marking_started_before != old_marking_started_after) {
1857-
LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
1858-
return true;
1859-
}
1860-
}
1861-
18621935
// If VMOp was successful then it started a new cycle that the above
18631936
// wait &etc should have recognized as finishing this request. This
18641937
// differs from a non-user-request, where gc_succeeded does not imply
18651938
// a new cycle was started.
18661939
assert(!op.gc_succeeded(), "invariant");
18671940

1868-
if (op.cycle_already_in_progress()) {
1869-
// If VMOp failed because a cycle was already in progress, it
1870-
// is now complete. But it didn't finish this user-requested
1871-
// GC, so try again.
1872-
LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
1873-
continue;
1874-
} else if (op.whitebox_attached()) {
1875-
// If WhiteBox wants control, wait for notification of a state
1876-
// change in the controller, then try again. Don't wait for
1877-
// release of control, since collections may complete while in
1878-
// control. Note: This won't recognize a STW full collection
1879-
// while waiting; we can't wait on multiple monitors.
1880-
LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall");
1881-
MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
1882-
if (ConcurrentGCBreakpoints::is_controlled()) {
1883-
ml.wait();
1884-
}
1941+
if (should_retry_vm_op(cause, &op)) {
18851942
continue;
18861943
}
18871944
}

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,14 @@ class G1CollectedHeap : public CollectedHeap {
274274
// (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
275275
bool should_do_concurrent_full_gc(GCCause::Cause cause);
276276

277+
// Wait until a full mark (either currently in progress or one that completed
278+
// after the current request) has finished. Returns whether that full mark started
279+
// after this request. If so, we typically do not need another one.
280+
bool wait_full_mark_finished(GCCause::Cause cause,
281+
uint old_marking_started_before,
282+
uint old_marking_started_after,
283+
uint old_marking_completed_after);
284+
277285
// Attempt to start a concurrent cycle with the indicated cause.
278286
// precondition: should_do_concurrent_full_gc(cause)
279287
bool try_collect_concurrently(GCCause::Cause cause,

src/hotspot/share/gc/g1/g1CollectorState.hpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,9 @@ class G1CollectorState {
6060
// do the concurrent start phase work.
6161
volatile bool _initiate_conc_mark_if_possible;
6262

63+
// Marking is in progress. Set from start of the concurrent start pause to the
64+
// end of the Remark pause.
65+
bool _mark_in_progress;
6366
// Marking or rebuilding remembered set work is in progress. Set from the end
6467
// of the concurrent start pause to the end of the Cleanup pause.
6568
bool _mark_or_rebuild_in_progress;
@@ -78,6 +81,7 @@ class G1CollectorState {
7881
_in_concurrent_start_gc(false),
7982
_initiate_conc_mark_if_possible(false),
8083

84+
_mark_in_progress(false),
8185
_mark_or_rebuild_in_progress(false),
8286
_clear_bitmap_in_progress(false),
8387
_in_full_gc(false) { }
@@ -92,6 +96,7 @@ class G1CollectorState {
9296

9397
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
9498

99+
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
95100
void set_mark_or_rebuild_in_progress(bool v) { _mark_or_rebuild_in_progress = v; }
96101
void set_clear_bitmap_in_progress(bool v) { _clear_bitmap_in_progress = v; }
97102

@@ -106,6 +111,7 @@ class G1CollectorState {
106111

107112
bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; }
108113

114+
bool mark_in_progress() const { return _mark_in_progress; }
109115
bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; }
110116
bool clear_bitmap_in_progress() const { return _clear_bitmap_in_progress; }
111117

src/hotspot/share/gc/g1/g1Policy.cpp

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -591,6 +591,7 @@ void G1Policy::record_full_collection_end() {
591591
collector_state()->set_in_young_gc_before_mixed(false);
592592
collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC"));
593593
collector_state()->set_in_concurrent_start_gc(false);
594+
collector_state()->set_mark_in_progress(false);
594595
collector_state()->set_mark_or_rebuild_in_progress(false);
595596
collector_state()->set_clear_bitmap_in_progress(false);
596597

@@ -703,6 +704,7 @@ void G1Policy::record_concurrent_mark_remark_end() {
703704
double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
704705
_analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
705706
record_pause(G1GCPauseType::Remark, start_time_sec, end_time_sec);
707+
collector_state()->set_mark_in_progress(false);
706708
}
707709

708710
G1CollectionSetCandidates* G1Policy::candidates() const {
@@ -936,6 +938,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
936938
assert(!(G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
937939
"If the last pause has been concurrent start, we should not have been in the marking window");
938940
if (G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause)) {
941+
collector_state()->set_mark_in_progress(concurrent_operation_is_full_mark);
939942
collector_state()->set_mark_or_rebuild_in_progress(concurrent_operation_is_full_mark);
940943
}
941944

@@ -1222,6 +1225,17 @@ void G1Policy::initiate_conc_mark() {
12221225
collector_state()->set_initiate_conc_mark_if_possible(false);
12231226
}
12241227

1228+
static const char* requester_for_mixed_abort(GCCause::Cause cause) {
1229+
if (cause == GCCause::_wb_breakpoint) {
1230+
return "run_to breakpoint";
1231+
} else if (GCCause::is_codecache_requested_gc(cause)) {
1232+
return "codecache";
1233+
} else {
1234+
assert(G1CollectedHeap::heap()->is_user_requested_concurrent_full_gc(cause), "must be");
1235+
return "user";
1236+
}
1237+
}
1238+
12251239
void G1Policy::decide_on_concurrent_start_pause() {
12261240
// We are about to decide on whether this pause will be a
12271241
// concurrent start pause.
@@ -1254,8 +1268,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
12541268
initiate_conc_mark();
12551269
log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
12561270
} else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
1257-
(cause == GCCause::_codecache_GC_threshold) ||
1258-
(cause == GCCause::_codecache_GC_aggressive) ||
1271+
GCCause::is_codecache_requested_gc(cause) ||
12591272
(cause == GCCause::_wb_breakpoint)) {
12601273
// Initiate a concurrent start. A concurrent start must be a young only
12611274
// GC, so the collector state must be updated to reflect this.
@@ -1270,7 +1283,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
12701283
abort_time_to_mixed_tracking();
12711284
initiate_conc_mark();
12721285
log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
1273-
(cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user");
1286+
requester_for_mixed_abort(cause));
12741287
} else {
12751288
// The concurrent marking thread is still finishing up the
12761289
// previous cycle. If we start one right now the two cycles

src/hotspot/share/gc/g1/g1VMOperations.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
5959
GCCause::Cause gc_cause) :
6060
VM_GC_Collect_Operation(gc_count_before, gc_cause),
6161
_transient_failure(false),
62+
_mark_in_progress(false),
6263
_cycle_already_in_progress(false),
6364
_whitebox_attached(false),
6465
_terminating(false),
@@ -83,6 +84,9 @@ void VM_G1TryInitiateConcMark::doit() {
8384
// Record for handling by caller.
8485
_terminating = g1h->concurrent_mark_is_terminating();
8586

87+
_mark_in_progress = g1h->collector_state()->mark_in_progress();
88+
_cycle_already_in_progress = g1h->concurrent_mark()->cm_thread()->in_progress();
89+
8690
if (_terminating && GCCause::is_user_requested_gc(_gc_cause)) {
8791
// When terminating, the request to initiate a concurrent cycle will be
8892
// ignored by do_collection_pause_at_safepoint; instead it will just do
@@ -91,9 +95,8 @@ void VM_G1TryInitiateConcMark::doit() {
9195
// requests the alternative GC might still be needed.
9296
} else if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) {
9397
// Failure to force the next GC pause to be a concurrent start indicates
94-
// there is already a concurrent marking cycle in progress. Set flag
95-
// to notify the caller and return immediately.
96-
_cycle_already_in_progress = true;
98+
// there is already a concurrent marking cycle in progress. Flags to indicate
99+
// that were already set, so return immediately.
97100
} else if ((_gc_cause != GCCause::_wb_breakpoint) &&
98101
ConcurrentGCBreakpoints::is_controlled()) {
99102
// WhiteBox wants to be in control of concurrent cycles, so don't try to

src/hotspot/share/gc/g1/g1VMOperations.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ class VM_G1CollectFull : public VM_GC_Collect_Operation {
4545

4646
class VM_G1TryInitiateConcMark : public VM_GC_Collect_Operation {
4747
bool _transient_failure;
48+
bool _mark_in_progress;
4849
bool _cycle_already_in_progress;
4950
bool _whitebox_attached;
5051
bool _terminating;
@@ -59,6 +60,7 @@ class VM_G1TryInitiateConcMark : public VM_GC_Collect_Operation {
5960
virtual bool doit_prologue();
6061
virtual void doit();
6162
bool transient_failure() const { return _transient_failure; }
63+
bool mark_in_progress() const { return _mark_in_progress; }
6264
bool cycle_already_in_progress() const { return _cycle_already_in_progress; }
6365
bool whitebox_attached() const { return _whitebox_attached; }
6466
bool terminating() const { return _terminating; }

src/hotspot/share/gc/shared/gcCause.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,11 @@ class GCCause : public AllStatic {
104104
cause == GCCause::_heap_dump);
105105
}
106106

107+
inline static bool is_codecache_requested_gc(GCCause::Cause cause) {
108+
return (cause == _codecache_GC_threshold ||
109+
cause == _codecache_GC_aggressive);
110+
}
111+
107112
// Causes for collection of the tenured generation
108113
inline static bool is_tenured_allocation_failure_gc(GCCause::Cause cause) {
109114
// _allocation_failure is the generic cause a collection which could result

0 commit comments

Comments
 (0)