@@ -816,6 +816,12 @@ func gcStart(trigger gcTrigger) {
816
816
schedEnableUser (false )
817
817
}
818
818
819
+ if work .goroutineLeakFinder .pending .Load () {
820
+ work .goroutineLeakFinder .enabled = true
821
+ work .goroutineLeakFinder .pending .Store (false )
822
+ gcUntrackSyncObjects ()
823
+ }
824
+
819
825
// Enter concurrent mark phase and enable
820
826
// write barriers.
821
827
//
@@ -832,11 +838,6 @@ func gcStart(trigger gcTrigger) {
832
838
// possible.
833
839
setGCPhase (_GCmark )
834
840
835
- if work .goroutineLeakFinder .pending .Load () {
836
- work .goroutineLeakFinder .enabled = true
837
- work .goroutineLeakFinder .pending .Store (false )
838
- }
839
-
840
841
gcBgMarkPrepare () // Must happen before assists are enabled.
841
842
gcPrepareMarkRoots ()
842
843
@@ -1110,8 +1111,6 @@ top:
1110
1111
func (gp * g ) checkIfMaybeRunnable () bool {
1111
1112
// Unmask the goroutine address to ensure we are not
1112
1113
// dereferencing a masked address.
1113
- gp = gp .unmask ()
1114
-
1115
1114
switch gp .waitreason {
1116
1115
case waitReasonSelectNoCases ,
1117
1116
waitReasonChanSendNilChan ,
@@ -1125,7 +1124,7 @@ func (gp *g) checkIfMaybeRunnable() bool {
1125
1124
// Cycle all through all *sudog to check whether
1126
1125
// the goroutine is waiting on a marked channel.
1127
1126
for sg := gp .waiting ; sg != nil ; sg = sg .waitlink {
1128
- if isMarkedOrNotInHeap (unsafe .Pointer (sg .c )) {
1127
+ if isMarkedOrNotInHeap (unsafe .Pointer (sg .c . get () )) {
1129
1128
return true
1130
1129
}
1131
1130
}
@@ -1139,26 +1138,12 @@ func (gp *g) checkIfMaybeRunnable() bool {
1139
1138
// check if the synchronization primitive attached to the sudog is marked.
1140
1139
if gp .waiting != nil {
1141
1140
// Unmask the sema address and check if it's marked.
1142
- return isMarkedOrNotInHeap (gcUnmask ( gp .waiting .elem ))
1141
+ return isMarkedOrNotInHeap (gp .waiting .elem . get ( ))
1143
1142
}
1144
1143
}
1145
1144
return true
1146
1145
}
1147
1146
1148
- // unmask returns a *g object with an unmasked address.
1149
- //
1150
- //go:nosplit
1151
- func (gp * g ) unmask () * g {
1152
- return (* g )(gcUnmask (unsafe .Pointer (gp )))
1153
- }
1154
-
1155
- // mask returns a *g object with a masked address.
1156
- //
1157
- //go:nosplit
1158
- func (gp * g ) mask () * g {
1159
- return (* g )(gcMask (unsafe .Pointer (gp )))
1160
- }
1161
-
1162
1147
// Check to see if more blocked but marked goroutines exist;
1163
1148
// if so add them into root set and increment work.markrootJobs accordingly
1164
1149
// return true if we need to run another phase of markroots; return false otherwise
@@ -1171,16 +1156,14 @@ func gcDiscoverMoreStackRoots() {
1171
1156
1172
1157
// Reorder goroutine list
1173
1158
for vIndex < ivIndex {
1174
- gp := work .stackRoots [vIndex ]
1175
- if gp .checkIfMaybeRunnable () {
1176
- work .stackRoots [vIndex ] = gp
1159
+ if work .stackRoots [vIndex ].checkIfMaybeRunnable () {
1177
1160
vIndex = vIndex + 1
1178
1161
continue
1179
1162
}
1180
1163
for ivIndex = ivIndex - 1 ; ivIndex != vIndex ; ivIndex = ivIndex - 1 {
1181
- if swapGp := work .stackRoots [ivIndex ]; swapGp .checkIfMaybeRunnable () {
1182
- work .stackRoots [ivIndex ] = gp
1183
- work .stackRoots [vIndex ] = swapGp . unmask ()
1164
+ if gp := work .stackRoots [ivIndex ]; gp .checkIfMaybeRunnable () {
1165
+ work .stackRoots [ivIndex ] = work . stackRoots [ vIndex ]
1166
+ work .stackRoots [vIndex ] = gp
1184
1167
vIndex = vIndex + 1
1185
1168
break
1186
1169
}
@@ -1197,6 +1180,35 @@ func gcDiscoverMoreStackRoots() {
1197
1180
}
1198
1181
}
1199
1182
1183
+ // getSyncObjectsUnreachable scans allgs and sets the elem and c fields of all sudogs to
1184
+ // an untrackable pointer. This prevents the GC from marking these objects as live in memory
1185
+ // by following these pointers when runnning deadlock detection.
1186
+ func gcUntrackSyncObjects () {
1187
+ assertWorldStopped ()
1188
+
1189
+ forEachGRace (func (gp * g ) {
1190
+ for sg := gp .waiting ; sg != nil ; sg = sg .waitlink {
1191
+ sg .elem .untrack ()
1192
+ sg .c .untrack ()
1193
+ }
1194
+ })
1195
+ }
1196
+
1197
+ // gcRestoreSyncObjects restores the elem and c fields of all sudogs to their original values.
1198
+ // Should be invoked after the goroutine leak detection phase.
1199
+ //
1200
+ //go:nosplit
1201
+ func gcRestoreSyncObjects () {
1202
+ assertWorldStopped ()
1203
+
1204
+ forEachGRace (func (gp * g ) {
1205
+ for sg := gp .waiting ; sg != nil ; sg = sg .waitlink {
1206
+ sg .elem .track ()
1207
+ sg .c .track ()
1208
+ }
1209
+ })
1210
+ }
1211
+
1200
1212
// findGoleaks scans the remaining stackRoots and marks any which are
1201
1213
// blocked over exclusively unreachable concurrency primitives as leaked (deadlocked).
1202
1214
// Returns true if the goroutine leak check was performed (or unnecessary).
@@ -1216,7 +1228,7 @@ func findGoleaks() bool {
1216
1228
// Make sure these are pushed to the runnable set and ready to be marked.
1217
1229
var foundMoreWork bool
1218
1230
for i := work .nLiveStackRoots ; i < work .nStackRoots ; i ++ {
1219
- gp := work .stackRoots [i ]. unmask ()
1231
+ gp := work .stackRoots [i ]
1220
1232
if readgstatus (gp ) == _Gwaiting && ! gp .checkIfMaybeRunnable () {
1221
1233
// Blocking unrunnable goroutines will be skipped.
1222
1234
continue
@@ -1237,7 +1249,7 @@ func findGoleaks() bool {
1237
1249
1238
1250
// For the remaining goroutines, mark them as unreachable and leaked.
1239
1251
for i := work .nLiveStackRoots ; i < work .nStackRoots ; i ++ {
1240
- gp := work .stackRoots [i ]. unmask ()
1252
+ gp := work .stackRoots [i ]
1241
1253
casgstatus (gp , _Gwaiting , _Gleaked )
1242
1254
fn := findfunc (gp .startpc )
1243
1255
if fn .valid () {
@@ -1247,7 +1259,6 @@ func findGoleaks() bool {
1247
1259
}
1248
1260
traceback (gp .sched .pc , gp .sched .sp , gp .sched .lr , gp )
1249
1261
println ()
1250
- work .stackRoots [i ] = gp
1251
1262
}
1252
1263
// Put the remaining roots as ready for marking and drain them.
1253
1264
work .markrootJobs += uint32 (work .nStackRoots - work .nLiveStackRoots )
@@ -1407,6 +1418,11 @@ func gcMarkTermination(stw worldStop) {
1407
1418
throw ("non-concurrent sweep failed to drain all sweep queues" )
1408
1419
}
1409
1420
1421
+ if work .goroutineLeakFinder .enabled {
1422
+ // Restore the elem and c fields of all sudogs to their original values.
1423
+ gcRestoreSyncObjects ()
1424
+ }
1425
+
1410
1426
systemstack (func () {
1411
1427
// Pull the GC out of goroutine leak detection mode.
1412
1428
work .goroutineLeakFinder .enabled = false
0 commit comments