Skip to content

Commit 11d5484

Browse files
committed
runtime: fix self-deadlock on sbrk platforms
The sbrk mem.go implementation doesn't enforce being called on the systemstack, but it can call back into itself if there's a stack growth. Because the sbrk implementation requires acquiring memlock, it can self-deadlock. For the most part the mem.go API is called on the system stack, but there are cases where we call sysAlloc on the regular Go stack. This is fine in general, except on sbrk platforms because of the aforementioned deadlock. This change, rather than adding a new invariant to mem.go, switches to the systemstack in the mem.go API implementation for sbrk platforms. Change-Id: Ie0f0ea80a8d7578cdeabc8252107e64a5e633856 Reviewed-on: https://go-review.googlesource.com/c/go/+/709775 Reviewed-by: Cherry Mui <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]>
1 parent 2e52060 commit 11d5484

File tree

1 file changed

+103
-55
lines changed

1 file changed

+103
-55
lines changed

src/runtime/mem_sbrk.go

Lines changed: 103 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -48,13 +48,32 @@ type memHdrPtr uintptr
4848
func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
4949
func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
5050

51+
// memAlloc allocates n bytes from the brk reservation, or if it's full,
52+
// the system.
53+
//
54+
// memlock must be held.
55+
//
56+
// memAlloc must be called on the system stack, otherwise a stack growth
57+
// could cause us to call back into it. Since memlock is held, that could
58+
// lead to a self-deadlock.
59+
//
60+
//go:systemstack
5161
func memAlloc(n uintptr) unsafe.Pointer {
5262
if p := memAllocNoGrow(n); p != nil {
5363
return p
5464
}
5565
return sbrk(n)
5666
}
5767

68+
// memAllocNoGrow attempts to allocate n bytes from the existing brk.
69+
//
70+
// memlock must be held.
71+
//
72+
// memAlloc must be called on the system stack, otherwise a stack growth
73+
// could cause us to call back into it. Since memlock is held, that could
74+
// lead to a self-deadlock.
75+
//
76+
//go:systemstack
5877
func memAllocNoGrow(n uintptr) unsafe.Pointer {
5978
n = memRound(n)
6079
var prevp *memHdr
@@ -78,6 +97,15 @@ func memAllocNoGrow(n uintptr) unsafe.Pointer {
7897
return nil
7998
}
8099

100+
// memFree makes [ap, ap+n) available for reallocation by memAlloc.
101+
//
102+
// memlock must be held.
103+
//
104+
// memAlloc must be called on the system stack, otherwise a stack growth
105+
// could cause us to call back into it. Since memlock is held, that could
106+
// lead to a self-deadlock.
107+
//
108+
//go:systemstack
81109
func memFree(ap unsafe.Pointer, n uintptr) {
82110
n = memRound(n)
83111
memclrNoHeapPointers(ap, n)
@@ -122,6 +150,15 @@ func memFree(ap unsafe.Pointer, n uintptr) {
122150
}
123151
}
124152

153+
// memCheck checks invariants around free list management.
154+
//
155+
// memlock must be held.
156+
//
157+
// memAlloc must be called on the system stack, otherwise a stack growth
158+
// could cause us to call back into it. Since memlock is held, that could
159+
// lead to a self-deadlock.
160+
//
161+
//go:systemstack
125162
func memCheck() {
126163
if !memDebug {
127164
return
@@ -158,26 +195,31 @@ func initBloc() {
158195
}
159196

160197
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
161-
lock(&memlock)
162-
p := memAlloc(n)
163-
memCheck()
164-
unlock(&memlock)
165-
return p
198+
var p uintptr
199+
systemstack(func() {
200+
lock(&memlock)
201+
p = uintptr(memAlloc(n))
202+
memCheck()
203+
unlock(&memlock)
204+
})
205+
return unsafe.Pointer(p)
166206
}
167207

168208
func sysFreeOS(v unsafe.Pointer, n uintptr) {
169-
lock(&memlock)
170-
if uintptr(v)+n == bloc {
171-
// Address range being freed is at the end of memory,
172-
// so record a new lower value for end of memory.
173-
// Can't actually shrink address space because segment is shared.
174-
memclrNoHeapPointers(v, n)
175-
bloc -= n
176-
} else {
177-
memFree(v, n)
178-
memCheck()
179-
}
180-
unlock(&memlock)
209+
systemstack(func() {
210+
lock(&memlock)
211+
if uintptr(v)+n == bloc {
212+
// Address range being freed is at the end of memory,
213+
// so record a new lower value for end of memory.
214+
// Can't actually shrink address space because segment is shared.
215+
memclrNoHeapPointers(v, n)
216+
bloc -= n
217+
} else {
218+
memFree(v, n)
219+
memCheck()
220+
}
221+
unlock(&memlock)
222+
})
181223
}
182224

183225
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
@@ -202,49 +244,55 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) {
202244
}
203245

204246
func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
205-
lock(&memlock)
206-
var p unsafe.Pointer
207-
if uintptr(v) == bloc {
208-
// Address hint is the current end of memory,
209-
// so try to extend the address space.
210-
p = sbrk(n)
211-
}
212-
if p == nil && v == nil {
213-
p = memAlloc(n)
214-
memCheck()
215-
}
216-
unlock(&memlock)
217-
return p
247+
var p uintptr
248+
systemstack(func() {
249+
lock(&memlock)
250+
if uintptr(v) == bloc {
251+
// Address hint is the current end of memory,
252+
// so try to extend the address space.
253+
p = uintptr(sbrk(n))
254+
}
255+
if p == 0 && v == nil {
256+
p = uintptr(memAlloc(n))
257+
memCheck()
258+
}
259+
unlock(&memlock)
260+
})
261+
return unsafe.Pointer(p)
218262
}
219263

220264
func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
221-
lock(&memlock)
222-
if p := memAllocNoGrow(size + align); p != nil {
223-
// We can satisfy the reservation from the free list.
224-
// Trim off the unaligned parts.
225-
pAligned := alignUp(uintptr(p), align)
226-
if startLen := pAligned - uintptr(p); startLen > 0 {
227-
memFree(p, startLen)
265+
var p uintptr
266+
systemstack(func() {
267+
lock(&memlock)
268+
if base := memAllocNoGrow(size + align); base != nil {
269+
// We can satisfy the reservation from the free list.
270+
// Trim off the unaligned parts.
271+
start := alignUp(uintptr(base), align)
272+
if startLen := start - uintptr(base); startLen > 0 {
273+
memFree(base, startLen)
274+
}
275+
end := start + size
276+
if endLen := (uintptr(base) + size + align) - end; endLen > 0 {
277+
memFree(unsafe.Pointer(end), endLen)
278+
}
279+
memCheck()
280+
unlock(&memlock)
281+
p = start
282+
return
228283
}
229-
end := pAligned + size
230-
if endLen := (uintptr(p) + size + align) - end; endLen > 0 {
231-
memFree(unsafe.Pointer(end), endLen)
284+
285+
// Round up bloc to align, then allocate size.
286+
p = alignUp(bloc, align)
287+
r := sbrk(p + size - bloc)
288+
if r == nil {
289+
p, size = 0, 0
290+
} else if l := p - uintptr(r); l > 0 {
291+
// Free the area we skipped over for alignment.
292+
memFree(r, l)
293+
memCheck()
232294
}
233-
memCheck()
234295
unlock(&memlock)
235-
return unsafe.Pointer(pAligned), size
236-
}
237-
238-
// Round up bloc to align, then allocate size.
239-
p := alignUp(bloc, align)
240-
r := sbrk(p + size - bloc)
241-
if r == nil {
242-
p, size = 0, 0
243-
} else if l := p - uintptr(r); l > 0 {
244-
// Free the area we skipped over for alignment.
245-
memFree(r, l)
246-
memCheck()
247-
}
248-
unlock(&memlock)
296+
})
249297
return unsafe.Pointer(p), size
250298
}

0 commit comments

Comments
 (0)