9
9
"bytes"
10
10
"cmp"
11
11
"encoding/binary"
12
+ "errors"
12
13
"fmt"
13
14
"io"
14
15
"slices"
@@ -32,22 +33,102 @@ type generation struct {
32
33
* evTable
33
34
}
34
35
36
+ // readGeneration buffers and decodes the structural elements of a trace generation
37
+ // out of r.
38
+ func readGeneration (r * bufio.Reader , ver version.Version ) (* generation , error ) {
39
+ if ver < version .Go126 {
40
+ return nil , errors .New ("internal error: readGeneration called for <1.26 trace" )
41
+ }
42
+ g := & generation {
43
+ evTable : & evTable {
44
+ pcs : make (map [uint64 ]frame ),
45
+ },
46
+ batches : make (map [ThreadID ][]batch ),
47
+ }
48
+
49
+ // Read batches one at a time until we either hit the next generation.
50
+ for {
51
+ b , gen , err := readBatch (r )
52
+ if err == io .EOF {
53
+ if len (g .batches ) != 0 {
54
+ return nil , errors .New ("incomplete generation found; trace likely truncated" )
55
+ }
56
+ return nil , nil // All done.
57
+ }
58
+ if err != nil {
59
+ return nil , err
60
+ }
61
+ if g .gen == 0 {
62
+ // Initialize gen.
63
+ g .gen = gen
64
+ }
65
+ if b .isEndOfGeneration () {
66
+ break
67
+ }
68
+ if gen == 0 {
69
+ // 0 is a sentinel used by the runtime, so we'll never see it.
70
+ return nil , fmt .Errorf ("invalid generation number %d" , gen )
71
+ }
72
+ if gen != g .gen {
73
+ return nil , fmt .Errorf ("broken trace: missing end-of-generation event, or generations are interleaved" )
74
+ }
75
+ if g .minTs == 0 || b .time < g .minTs {
76
+ g .minTs = b .time
77
+ }
78
+ if err := processBatch (g , b , ver ); err != nil {
79
+ return nil , err
80
+ }
81
+ }
82
+
83
+ // Check some invariants.
84
+ if g .freq == 0 {
85
+ return nil , fmt .Errorf ("no frequency event found" )
86
+ }
87
+ if ! g .hasClockSnapshot {
88
+ return nil , fmt .Errorf ("no clock snapshot event found" )
89
+ }
90
+
91
+ // N.B. Trust that the batch order is correct. We can't validate the batch order
92
+ // by timestamp because the timestamps could just be plain wrong. The source of
93
+ // truth is the order things appear in the trace and the partial order sequence
94
+ // numbers on certain events. If it turns out the batch order is actually incorrect
95
+ // we'll very likely fail to advance a partial order from the frontier.
96
+
97
+ // Compactify stacks and strings for better lookup performance later.
98
+ g .stacks .compactify ()
99
+ g .strings .compactify ()
100
+
101
+ // Validate stacks.
102
+ if err := validateStackStrings (& g .stacks , & g .strings , g .pcs ); err != nil {
103
+ return nil , err
104
+ }
105
+
106
+ // Now that we have the frequency, fix up CPU samples.
107
+ fixUpCPUSamples (g .cpuSamples , g .freq )
108
+ return g , nil
109
+ }
110
+
35
111
// spilledBatch represents a batch that was read out for the next generation,
36
112
// while reading the previous one. It's passed on when parsing the next
37
113
// generation.
114
+ //
115
+ // Used only for trace versions < Go126.
38
116
type spilledBatch struct {
39
117
gen uint64
40
118
* batch
41
119
}
42
120
43
- // readGeneration buffers and decodes the structural elements of a trace generation
121
+ // readGenerationWithSpill buffers and decodes the structural elements of a trace generation
44
122
// out of r. spill is the first batch of the new generation (already buffered and
45
123
// parsed from reading the last generation). Returns the generation and the first
46
124
// batch read of the next generation, if any.
47
125
//
48
126
// If gen is non-nil, it is valid and must be processed before handling the returned
49
127
// error.
50
- func readGeneration (r * bufio.Reader , spill * spilledBatch , ver version.Version ) (* generation , * spilledBatch , error ) {
128
+ func readGenerationWithSpill (r * bufio.Reader , spill * spilledBatch , ver version.Version ) (* generation , * spilledBatch , error ) {
129
+ if ver >= version .Go126 {
130
+ return nil , nil , errors .New ("internal error: readGenerationWithSpill called for Go 1.26+ trace" )
131
+ }
51
132
g := & generation {
52
133
evTable : & evTable {
53
134
pcs : make (map [uint64 ]frame ),
@@ -56,15 +137,15 @@ func readGeneration(r *bufio.Reader, spill *spilledBatch, ver version.Version) (
56
137
}
57
138
// Process the spilled batch.
58
139
if spill != nil {
140
+ // Process the spilled batch, which contains real data.
59
141
g .gen = spill .gen
60
142
g .minTs = spill .batch .time
61
143
if err := processBatch (g , * spill .batch , ver ); err != nil {
62
144
return nil , nil , err
63
145
}
64
146
spill = nil
65
147
}
66
- // Read batches one at a time until we either hit EOF or
67
- // the next generation.
148
+ // Read batches one at a time until we either hit the next generation.
68
149
var spillErr error
69
150
for {
70
151
b , gen , err := readBatch (r )
@@ -73,7 +154,7 @@ func readGeneration(r *bufio.Reader, spill *spilledBatch, ver version.Version) (
73
154
}
74
155
if err != nil {
75
156
if g .gen != 0 {
76
- // This is an error reading the first batch of the next generation.
157
+ // This may be an error reading the first batch of the next generation.
77
158
// This is fine. Let's forge ahead assuming that what we've got so
78
159
// far is fine.
79
160
spillErr = err
@@ -89,7 +170,8 @@ func readGeneration(r *bufio.Reader, spill *spilledBatch, ver version.Version) (
89
170
// Initialize gen.
90
171
g .gen = gen
91
172
}
92
- if gen == g .gen + 1 { // TODO: advance this the same way the runtime does.
173
+ if gen == g .gen + 1 {
174
+ // TODO: Increment the generation with wraparound the same way the runtime does.
93
175
spill = & spilledBatch {gen : gen , batch : & b }
94
176
break
95
177
}
@@ -134,15 +216,8 @@ func readGeneration(r *bufio.Reader, spill *spilledBatch, ver version.Version) (
134
216
return nil , nil , err
135
217
}
136
218
137
- // Fix up the CPU sample timestamps, now that we have freq.
138
- for i := range g .cpuSamples {
139
- s := & g .cpuSamples [i ]
140
- s .time = g .freq .mul (timestamp (s .time ))
141
- }
142
- // Sort the CPU samples.
143
- slices .SortFunc (g .cpuSamples , func (a , b cpuSample ) int {
144
- return cmp .Compare (a .time , b .time )
145
- })
219
+ // Now that we have the frequency, fix up CPU samples.
220
+ fixUpCPUSamples (g .cpuSamples , g .freq )
146
221
return g , spill , spillErr
147
222
}
148
223
@@ -174,6 +249,8 @@ func processBatch(g *generation, b batch, ver version.Version) error {
174
249
if err := addExperimentalBatch (g .expBatches , b ); err != nil {
175
250
return err
176
251
}
252
+ case b .isEndOfGeneration ():
253
+ return errors .New ("internal error: unexpectedly processing EndOfGeneration; broken trace?" )
177
254
default :
178
255
if _ , ok := g .batches [b .m ]; ! ok {
179
256
g .batchMs = append (g .batchMs , b .m )
@@ -512,3 +589,15 @@ func addExperimentalBatch(expBatches map[tracev2.Experiment][]ExperimentalBatch,
512
589
})
513
590
return nil
514
591
}
592
+
593
+ func fixUpCPUSamples (samples []cpuSample , freq frequency ) {
594
+ // Fix up the CPU sample timestamps.
595
+ for i := range samples {
596
+ s := & samples [i ]
597
+ s .time = freq .mul (timestamp (s .time ))
598
+ }
599
+ // Sort the CPU samples.
600
+ slices .SortFunc (samples , func (a , b cpuSample ) int {
601
+ return cmp .Compare (a .time , b .time )
602
+ })
603
+ }
0 commit comments