@@ -19,6 +19,7 @@ import (
1919 "github.com/cockroachdb/cockroach/pkg/util/log"
2020 "github.com/cockroachdb/cockroach/pkg/util/timeutil"
2121 "github.com/cockroachdb/cockroach/pkg/util/tracing"
22+ "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
2223)
2324
2425// ConstructTracingAggregatorProducerMeta constructs a ProducerMetadata that
@@ -45,25 +46,37 @@ func ConstructTracingAggregatorProducerMeta(
4546 }
4647 })
4748
49+ sp := tracing .SpanFromContext (ctx )
50+ if sp != nil {
51+ recType := sp .RecordingType ()
52+ if recType != tracingpb .RecordingOff {
53+ aggEvents .SpanTotals = sp .GetFullTraceRecording (recType ).Root .ChildrenMetadata
54+ }
55+ }
4856 return & execinfrapb.ProducerMetadata {AggregatorEvents : aggEvents }
4957}
5058
5159// ComponentAggregatorStats is a mapping from a component to all the Aggregator
5260// Stats collected for that component.
53- type ComponentAggregatorStats map [execinfrapb.ComponentID ]map [ string ][] byte
61+ type ComponentAggregatorStats map [execinfrapb.ComponentID ]execinfrapb. TracingAggregatorEvents
5462
5563// DeepCopy takes a deep copy of the component aggregator stats map.
5664func (c ComponentAggregatorStats ) DeepCopy () ComponentAggregatorStats {
5765 mapCopy := make (ComponentAggregatorStats , len (c ))
5866 for k , v := range c {
59- innerMap := make (map [string ][]byte , len (v ))
60- for k2 , v2 := range v {
67+ copied := v
68+ copied .Events = make (map [string ][]byte , len (v .Events ))
69+ copied .SpanTotals = make (map [string ]tracingpb.OperationMetadata , len (v .SpanTotals ))
70+ for k2 , v2 := range v .Events {
6171 // Create a copy of the byte slice to avoid modifying the original data.
6272 dataCopy := make ([]byte , len (v2 ))
6373 copy (dataCopy , v2 )
64- innerMap [k2 ] = dataCopy
74+ copied . Events [k2 ] = dataCopy
6575 }
66- mapCopy [k ] = innerMap
76+ for k2 , v2 := range v .SpanTotals {
77+ copied .SpanTotals [k2 ] = v2
78+ }
79+ mapCopy [k ] = copied
6780 }
6881 return mapCopy
6982}
@@ -84,13 +97,17 @@ func FlushTracingAggregatorStats(
8497) error {
8598 return db .Txn (ctx , func (ctx context.Context , txn isql.Txn ) error {
8699 clusterWideAggregatorStats := make (map [string ]tracing.AggregatorEvent )
100+ clusterWideOpMetadata := make (map [string ]tracingpb.OperationMetadata )
87101 asOf := timeutil .Now ().Format ("20060102_150405.00" )
88102
89103 var clusterWideSummary bytes.Buffer
90104 for component , nameToEvent := range perNodeAggregatorStats {
91- clusterWideSummary .WriteString (fmt .Sprintf ("## SQL Instance ID: %s; Flow ID: %s\n \n " ,
105+ clusterWideSummary .WriteString (fmt .Sprintf ("## SQL Instance ID: %s; Flow ID: %s\n " ,
92106 component .SQLInstanceID .String (), component .FlowID .String ()))
93- for name , event := range nameToEvent {
107+
108+ clusterWideSummary .WriteString ("### aggregated events\n \n " )
109+
110+ for name , event := range nameToEvent .Events {
94111 // Write a proto file per tag. This machine-readable file can be consumed
95112 // by other places we want to display this information egs: annotated
96113 // DistSQL diagrams, DBConsole etc.
@@ -122,6 +139,13 @@ func FlushTracingAggregatorStats(
122139 clusterWideAggregatorStats [name ] = aggEvent
123140 }
124141 }
142+
143+ clusterWideSummary .WriteString ("### span metadata\n \n " )
144+
145+ for name , metadata := range nameToEvent .SpanTotals {
146+ fmt .Fprintf (& clusterWideSummary , " - %s (%d): %s\n " , name , metadata .Count , metadata .Duration )
147+ clusterWideOpMetadata [name ] = clusterWideOpMetadata [name ].Combine (metadata )
148+ }
125149 }
126150
127151 for tag , event := range clusterWideAggregatorStats {
0 commit comments