Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions src/internal/profilerecord/profilerecord.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,16 @@
// TODO: Consider moving this to internal/runtime, see golang.org/issue/65355.
package profilerecord

import "unsafe"

type StackRecord struct {
Stack []uintptr
}

func (r StackRecord) GetStack() []uintptr { return r.Stack }
func (r StackRecord) GetLabels() unsafe.Pointer { return nil }
func (r StackRecord) GetGoroutine() GoroutineRecord { return GoroutineRecord{} }

type MemProfileRecord struct {
AllocBytes, FreeBytes int64
AllocObjects, FreeObjects int64
Expand All @@ -26,3 +32,18 @@ type BlockProfileRecord struct {
Cycles int64
Stack []uintptr
}

type GoroutineRecord struct {
ID uint64
State uint32
WaitReason uint8
CreatorID uint64
CreationPC uintptr
WaitSince int64 // approx time when the g became blocked, in nanoseconds
Labels unsafe.Pointer
Stack []uintptr
}

func (r GoroutineRecord) GetStack() []uintptr { return r.Stack }
func (r GoroutineRecord) GetLabels() unsafe.Pointer { return r.Labels }
func (r GoroutineRecord) GetGoroutine() GoroutineRecord { return r }
2 changes: 1 addition & 1 deletion src/net/http/pprof/pprof.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
runtime.GC()
}
debug, _ := strconv.Atoi(r.FormValue("debug"))
if debug != 0 {
if debug != 0 && !(name == "goroutine" && debug == 3) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
} else {
w.Header().Set("Content-Type", "application/octet-stream")
Expand Down
79 changes: 32 additions & 47 deletions src/runtime/mprof.go
Original file line number Diff line number Diff line change
Expand Up @@ -1245,40 +1245,21 @@ func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool)
})
}

//go:linkname pprof_goroutineProfileWithLabels
func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
return goroutineProfileWithLabels(p, labels)
//go:linkname pprof_goroutineProfile
func pprof_goroutineProfile(p []profilerecord.GoroutineRecord) (n int, ok bool) {
return goroutineProfileInternal(p)
}

// labels may be nil. If labels is non-nil, it must have the same length as p.
func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if labels != nil && len(labels) != len(p) {
labels = nil
}

return goroutineProfileWithLabelsConcurrent(p, labels)
}

//go:linkname pprof_goroutineLeakProfileWithLabels
func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
return goroutineLeakProfileWithLabelsConcurrent(p, labels)
}

// labels may be nil. If labels is non-nil, it must have the same length as p.
func goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if labels != nil && len(labels) != len(p) {
labels = nil
}

return goroutineLeakProfileWithLabelsConcurrent(p, labels)
//go:linkname pprof_goroutineLeakProfile
func pprof_goroutineLeakProfile(p []profilerecord.GoroutineRecord) (n int, ok bool) {
return goroutineLeakProfileInternal(p)
}

var goroutineProfile = struct {
sema uint32
active bool
offset atomic.Int64
records []profilerecord.StackRecord
labels []unsafe.Pointer
records []profilerecord.GoroutineRecord
}{
sema: 1,
}
Expand Down Expand Up @@ -1316,18 +1297,18 @@ func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileSt
return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
}

func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
func goroutineLeakProfileInternal(p []profilerecord.GoroutineRecord) (n int, ok bool) {
if len(p) == 0 {
// An empty slice is obviously too small. Return a rough
// allocation estimate.
return work.goroutineLeak.count, false
}

// Use the same semaphore as goroutineProfileWithLabelsConcurrent,
// Use the same semaphore as goroutineProfileInternal,
// because ultimately we still use goroutine profiles.
semacquire(&goroutineProfile.sema)

// Unlike in goroutineProfileWithLabelsConcurrent, we don't need to
// Unlike in goroutineProfileInternal, we don't need to
// save the current goroutine stack, because it is obviously not leaked.

pcbuf := makeProfStack() // see saveg() for explanation
Expand Down Expand Up @@ -1358,7 +1339,7 @@ func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, lab
return n, true
}

func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
func goroutineProfileInternal(p []profilerecord.GoroutineRecord) (n int, ok bool) {
if len(p) == 0 {
// An empty slice is obviously too small. Return a rough
// allocation estimate without bothering to STW. As long as
Expand Down Expand Up @@ -1401,9 +1382,15 @@ func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels
systemstack(func() {
saveg(pc, sp, ourg, &p[0], pcbuf)
})
if labels != nil {
labels[0] = ourg.labels
}

p[0].ID = ourg.goid
p[0].CreatorID = ourg.parentGoid
p[0].CreationPC = ourg.gopc
p[0].Labels = ourg.labels
p[0].State = readgstatus(ourg) &^ _Gscan
p[0].WaitReason = uint8(ourg.waitreason)
p[0].WaitSince = ourg.waitsince

ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
goroutineProfile.offset.Store(1)

Expand All @@ -1414,7 +1401,6 @@ func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels
// field set to goroutineProfileSatisfied.
goroutineProfile.active = true
goroutineProfile.records = p
goroutineProfile.labels = labels
startTheWorld(stw)

// Visit each goroutine that existed as of the startTheWorld call above.
Expand All @@ -1436,7 +1422,6 @@ func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels
endOffset := goroutineProfile.offset.Swap(0)
goroutineProfile.active = false
goroutineProfile.records = nil
goroutineProfile.labels = nil
startTheWorld(stw)

// Restore the invariant that every goroutine struct in allgs has its
Expand Down Expand Up @@ -1528,7 +1513,7 @@ func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
// System goroutines should not appear in the profile.
// Check this here and not in tryRecordGoroutineProfile because isSystemGoroutine
// may change on a goroutine while it is executing, so while the scheduler might
// see a system goroutine, goroutineProfileWithLabelsConcurrent might not, and
// see a system goroutine, goroutineProfileInternal might not, and
// this inconsistency could cause invariants to be violated, such as trying to
// record the stack of a running goroutine below. In short, we still want system
// goroutines to participate in the same state machine on gp1.goroutineProfiled as
Expand All @@ -1545,7 +1530,7 @@ func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
if offset >= len(goroutineProfile.records) {
// Should be impossible, but better to return a truncated profile than
// to crash the entire process at this point. Instead, deal with it in
// goroutineProfileWithLabelsConcurrent where we have more context.
// goroutineProfileInternal where we have more context.
return
}

Expand All @@ -1559,12 +1544,16 @@ func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
// to avoid schedule delays.
systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset], pcbuf) })

if goroutineProfile.labels != nil {
goroutineProfile.labels[offset] = gp1.labels
}
goroutineProfile.records[offset].Labels = gp1.labels
goroutineProfile.records[offset].ID = gp1.goid
goroutineProfile.records[offset].CreatorID = gp1.parentGoid
goroutineProfile.records[offset].CreationPC = gp1.gopc
goroutineProfile.records[offset].State = readgstatus(gp1) &^ _Gscan
goroutineProfile.records[offset].WaitReason = uint8(gp1.waitreason)
goroutineProfile.records[offset].WaitSince = gp1.waitsince
}

func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
func goroutineProfileWithLabelsSync(p []profilerecord.GoroutineRecord, labels []unsafe.Pointer) (n int, ok bool) {
gp := getg()

isOK := func(gp1 *g) bool {
Expand Down Expand Up @@ -1650,7 +1639,7 @@ func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsa
// Most clients should use the [runtime/pprof] package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool) {
records := make([]profilerecord.StackRecord, len(p))
records := make([]profilerecord.GoroutineRecord, len(p))
n, ok = goroutineProfileInternal(records)
if !ok {
return
Expand All @@ -1662,11 +1651,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
return
}

func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool) {
return goroutineProfileWithLabels(p, nil)
}

func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
func saveg(pc, sp uintptr, gp *g, r *profilerecord.GoroutineRecord, pcbuf []uintptr) {
// To reduce memory usage, we want to allocate a r.Stack that is just big
// enough to hold gp's stack trace. Naively we might achieve this by
// recording our stack trace into mp.profStack, and then allocating a
Expand Down
Loading