Skip to content

Commit 710b419

Browse files
committed
Renamed nMaybeLiveStackRoots to nMaybeRunnableStackRoots. Refactoring.
1 parent fb905bb commit 710b419

File tree

2 files changed

+54
-78
lines changed

2 files changed

+54
-78
lines changed

src/runtime/mgc.go

Lines changed: 51 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -373,11 +373,11 @@ type workType struct {
373373

374374
// Number of roots of various root types. Set by gcPrepareMarkRoots.
375375
//
376-
// During normal GC cycle, nStackRoots == nLiveStackRoots == len(stackRoots);
377-
// during goroutine leak detection, nLiveStackRoots is the number of stackRoots
378-
// to examine, and nStackRoots == len(stackRoots), which include goroutines that are
379-
// unmarked / not runnable
380-
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nLiveStackRoots int
376+
// During normal GC cycle, nStackRoots == nMaybeRunnableStackRoots == len(stackRoots);
377+
// during goroutine leak detection, nMaybeRunnableStackRoots is the number of stackRoots
378+
// scheduled for marking.
379+
// In both variants, nStackRoots == len(stackRoots).
380+
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nMaybeRunnableStackRoots int
381381

382382
// The following fields monitor the GC phase of the current cycle during
383383
// goroutine leak detection.
@@ -390,7 +390,7 @@ type workType struct {
390390
enabled bool
391391
// The GC has performed goroutine leak detection during the current GC cycle; it is set
392392
// during gcMarkDone(), right after goroutine leak detection has concluded, and unset during
393-
// gcStart(). Is protected by STW.
393+
// gcMarkTermination(). Is protected by STW.
394394
done bool
395395
}
396396

@@ -399,11 +399,10 @@ type workType struct {
399399

400400
// stackRoots is a snapshot of all of the Gs that existed before the
401401
// beginning of concurrent marking. During goroutine leak detection, stackRoots
402-
// is partitioned into two sets; to the left of nLiveStackRoots are stackRoots
403-
// of running / runnable goroutines and to the right of nLiveStackRoots are
402+
// is partitioned into two sets; to the left of nMaybeRunnableStackRoots are stackRoots
403+
// of running / runnable goroutines and to the right of nMaybeRunnableStackRoots are
404404
// stackRoots of unmarked / not runnable goroutines
405-
// gcDiscoverMoreStackRoots modifies the stackRoots array to redo the partition
406-
// after each marking phase iteration.
405+
// The stackRoots array is re-partitioned after each marking phase iteration.
407406
stackRoots []*g
408407

409408
// Each type of GC state transition is protected by a lock.
@@ -735,8 +734,8 @@ func gcStart(trigger gcTrigger) {
735734
mode = gcForceBlockMode
736735
} else if work.goroutineLeakFinder.pending.Load() || debug.gcgoroutineleaks > 0 {
737736
// If goroutine leak detection has been enabled (via GODEBUG=gcgoroutineleaks=1),
738-
// or via profiling, fully stop the world.
739-
mode = gcForceBlockMode
737+
// or via profiling, stop the world during the marking phase.
738+
mode = gcForceMode
740739
}
741740

742741
// Ok, we're doing it! Stop everybody else
@@ -799,7 +798,6 @@ func gcStart(trigger gcTrigger) {
799798
clearpools()
800799

801800
work.cycles.Add(1)
802-
work.goroutineLeakFinder.done = false
803801

804802
// Assists and workers can start the moment we start
805803
// the world.
@@ -939,7 +937,7 @@ func gcMarkDone() {
939937
// time.
940938
semacquire(&work.markDoneSema)
941939
if work.goroutineLeakFinder.enabled {
942-
gcDiscoverMoreStackRoots()
940+
findMaybeRunnableGoroutines()
943941
}
944942

945943
top:
@@ -1033,32 +1031,27 @@ top:
10331031
}
10341032
}
10351033
})
1036-
switch {
1037-
case restart:
1038-
gcDebugMarkDone.restartedDueTo27993 = true
10391034

1040-
getg().m.preemptoff = ""
1041-
systemstack(func() {
1042-
// Accumulate the time we were stopped before we had to start again.
1043-
work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
1044-
1045-
// Start the world again.
1046-
now := startTheWorldWithSema(0, stw)
1047-
work.pauseNS += now - stw.startedStopping
1048-
})
1049-
semrelease(&worldsema)
1050-
goto top
1051-
case work.goroutineLeakFinder.enabled && !work.goroutineLeakFinder.done:
1052-
// Detect goroutine leaks. If the returned value is true, then detection was
1053-
// performed during this cycle. Otherwise, more runnable goroutines were discovered,
1054-
// requiring additional mark work.
1055-
work.goroutineLeakFinder.done = findGoleaks()
1035+
// Check whether we need to resume the marking phase because of issue #27993
1036+
// or because of goroutine leak detection.
1037+
if restart || (work.goroutineLeakFinder.enabled && !work.goroutineLeakFinder.done) {
1038+
if restart {
1039+
// Restart because of issue #27993.
1040+
gcDebugMarkDone.restartedDueTo27993 = true
1041+
} else {
1042+
// Marking has reached a fixed-point. Attempt to detect goroutine leaks.
1043+
//
1044+
// If the returned value is true, then detection was performed during this cycle.
1045+
// Otherwise, more runnable goroutines were discovered, requiring additional mark work.
1046+
work.goroutineLeakFinder.done = findGoleaks()
1047+
}
10561048

10571049
getg().m.preemptoff = ""
10581050
systemstack(func() {
10591051
// Accumulate the time we were stopped before we had to start again.
10601052
work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
10611053

1054+
// Start the world again.
10621055
now := startTheWorldWithSema(0, stw)
10631056
work.pauseNS += now - stw.startedStopping
10641057
})
@@ -1142,16 +1135,17 @@ func (gp *g) checkIfMaybeRunnable() bool {
11421135
return true
11431136
}
11441137

1145-
// Check to see if more blocked but marked goroutines exist;
1146-
// if so add them into root set and increment work.markrootJobs accordingly
1147-
// return true if we need to run another phase of markroots; return false otherwise
1148-
func gcDiscoverMoreStackRoots() {
1149-
// to begin with we have a set of unchecked stackRoots between
1150-
// vIndex and ivIndex. During the loop, anything < vIndex should be
1151-
// valid stackRoots and anything >= ivIndex should be invalid stackRoots
1152-
// and the loop terminates when the two indices meet
1153-
var vIndex, ivIndex int = work.nLiveStackRoots, work.nStackRoots
1154-
1138+
// findMaybeRunnableGoroutines checks to see if more blocked but maybe-runnable goroutines exist.
1139+
// If so, it adds them into root set and increments work.markrootJobs accordingly.
1140+
// Returns true if we need to run another phase of markroots; returns false otherwise.
1141+
func findMaybeRunnableGoroutines() (moreWork bool) {
1142+
oldRootJobs := work.markrootJobs.Load()
1143+
1144+
// To begin with we have a set of unchecked stackRoots between
1145+
// vIndex and ivIndex. During the loop, anything < vIndex should be
1146+
// valid stackRoots and anything >= ivIndex should be invalid stackRoots.
1147+
// The loop terminates when the two indices meet.
1148+
var vIndex, ivIndex int = work.nMaybeRunnableStackRoots, work.nStackRoots
11551149
// Reorder goroutine list
11561150
for vIndex < ivIndex {
11571151
if work.stackRoots[vIndex].checkIfMaybeRunnable() {
@@ -1168,12 +1162,12 @@ func gcDiscoverMoreStackRoots() {
11681162
}
11691163
}
11701164

1171-
var newRootJobs int32 = int32(work.baseStacks) + int32(vIndex)
1172-
if newRootJobs > int32(work.markrootJobs.Load()) {
1173-
// reset markrootNext as it could have been incremented past markrootJobs
1174-
work.nLiveStackRoots = vIndex
1175-
work.markrootJobs.Store(uint32(newRootJobs))
1165+
newRootJobs := work.baseStacks + uint32(vIndex)
1166+
if newRootJobs > oldRootJobs {
1167+
work.nMaybeRunnableStackRoots = vIndex
1168+
work.markrootJobs.Store(newRootJobs)
11761169
}
1170+
return newRootJobs > oldRootJobs
11771171
}
11781172

11791173
// getSyncObjectsUnreachable scans allgs and sets the elem and c fields of all sudogs to
@@ -1208,43 +1202,24 @@ func gcRestoreSyncObjects() {
12081202
// findGoleaks scans the remaining stackRoots and marks any which are
12091203
// blocked over exclusively unreachable concurrency primitives as leaked (deadlocked).
12101204
// Returns true if the goroutine leak check was performed (or unnecessary).
1211-
// Returns false if the GC cycle has not yet computed all (maybe-)live goroutines.
1205+
// Returns false if the GC cycle has not yet computed all maybe-runnable goroutines.
12121206
func findGoleaks() bool {
12131207
// Report goroutine leaks and mark them unreachable, and resume marking
12141208
// we still need to mark these unreachable *g structs as they
12151209
// get reused, but their stack won't get scanned
1216-
if work.nLiveStackRoots == work.nStackRoots {
1217-
// nStackRoots == nLiveStackRoots means that all goroutines are marked.
1210+
if work.nMaybeRunnableStackRoots == work.nStackRoots {
1211+
// nMaybeRunnableStackRoots == nStackRoots means that all goroutines are marked.
12181212
return true
12191213
}
12201214

1221-
// Try to reach another fix point here. Keep scouting for runnable goroutines until
1222-
// none are left.
1223-
// Valid goroutines may be found after all GC work is drained.
1224-
// Make sure these are pushed to the runnable set and ready to be marked.
1225-
var foundMoreWork bool
1226-
for i := work.nLiveStackRoots; i < work.nStackRoots; i++ {
1227-
gp := work.stackRoots[i]
1228-
if readgstatus(gp) == _Gwaiting && !gp.checkIfMaybeRunnable() {
1229-
// Blocking unrunnable goroutines will be skipped.
1230-
continue
1231-
}
1232-
work.stackRoots[i] = work.stackRoots[work.nLiveStackRoots]
1233-
work.stackRoots[work.nLiveStackRoots] = gp
1234-
work.nLiveStackRoots += 1
1235-
// We now have one more markroot job.
1236-
work.markrootJobs.Add(1)
1237-
// We might still have some work to do.
1238-
// Make sure in the next iteration we will check re-check for new runnable goroutines.
1239-
foundMoreWork = true
1240-
}
1241-
if foundMoreWork {
1215+
// Check whether any more maybe-runnable goroutines can be found by the GC.
1216+
if findMaybeRunnableGoroutines() {
12421217
// We found more work, so we need to resume the marking phase.
12431218
return false
12441219
}
12451220

12461221
// For the remaining goroutines, mark them as unreachable and leaked.
1247-
for i := work.nLiveStackRoots; i < work.nStackRoots; i++ {
1222+
for i := work.nMaybeRunnableStackRoots; i < work.nStackRoots; i++ {
12481223
gp := work.stackRoots[i]
12491224
casgstatus(gp, _Gwaiting, _Gleaked)
12501225
fn := findfunc(gp.startpc)
@@ -1259,8 +1234,8 @@ func findGoleaks() bool {
12591234
println()
12601235
}
12611236
// Put the remaining roots as ready for marking and drain them.
1262-
work.markrootJobs.Add(int32(work.nStackRoots - work.nLiveStackRoots))
1263-
work.nLiveStackRoots = work.nStackRoots
1237+
work.markrootJobs.Add(int32(work.nStackRoots - work.nMaybeRunnableStackRoots))
1238+
work.nMaybeRunnableStackRoots = work.nStackRoots
12641239
return true
12651240
}
12661241

@@ -1424,6 +1399,7 @@ func gcMarkTermination(stw worldStop) {
14241399
systemstack(func() {
14251400
// Pull the GC out of goroutine leak detection mode.
14261401
work.goroutineLeakFinder.enabled = false
1402+
work.goroutineLeakFinder.done = false
14271403

14281404
// The memstats updated above must be updated with the world
14291405
// stopped to ensure consistency of some values, such as

src/runtime/mgcmark.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -155,17 +155,17 @@ func gcPrepareMarkRoots() {
155155
if work.goroutineLeakFinder.enabled {
156156
// goroutine leak finder GC --- only prepare runnable
157157
// goroutines for marking.
158-
work.stackRoots, work.nLiveStackRoots = allGsSnapshotSortedForGC()
158+
work.stackRoots, work.nMaybeRunnableStackRoots = allGsSnapshotSortedForGC()
159159
} else {
160160
// regular GC --- scan every goroutine
161161
work.stackRoots = allGsSnapshot()
162-
work.nLiveStackRoots = len(work.stackRoots)
162+
work.nMaybeRunnableStackRoots = len(work.stackRoots)
163163
}
164164

165165
work.nStackRoots = len(work.stackRoots)
166166

167167
work.markrootNext.Store(0)
168-
work.markrootJobs.Store(uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nLiveStackRoots))
168+
work.markrootJobs.Store(uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nMaybeRunnableStackRoots))
169169

170170
// Calculate base indexes of each root type
171171
work.baseData = uint32(fixedRootCount)

0 commit comments

Comments
 (0)