Skip to content

Commit 618c73a

Browse files
craig[bot]Eric Harmelingrafissjbowens
committed
107388: metrics: refactor histogram bucket generation and testing r=ericharmeling a=ericharmeling This commit refactors histogram bucketing for legibility and composibility. It also introduces a data-driven test for histogram bucket generation. This refactor should make it easier to add additional metric categories, distributions, and bucket types. Part of cockroachdb#97144. Release note: None 108415: roachtest: deflake npgsql test r=rafiss a=rafiss These upstream tests are flaky, so we ignore them. informs cockroachdb#108414 fixes cockroachdb#108044 fixes cockroachdb#108504 Release note: None 108535: roachtest: remove rangeTs variants of import-cancellation test r=stevendanna a=jbowens Previously the import-cancellation roachtest was split into two variants: one with MVCC range tombstones enabled and one without. MVCC range tombstones are always enabled now, so the two variants were effectively identical. This commit consolidates the two tests into a single `import-cancellation` roachtest. Informs cockroachdb#97869. Epic: None Release note: None Co-authored-by: Eric Harmeling <[email protected]> Co-authored-by: Rafi Shamim <[email protected]> Co-authored-by: Jackson Owens <[email protected]>
4 parents 8ca9b05 + c748c25 + 4cf794e + 0946522 commit 618c73a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+757
-703
lines changed

pkg/ccl/changefeedccl/metrics.go

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -552,52 +552,52 @@ func newAggregateMetrics(histogramWindow time.Duration) *AggMetrics {
552552
EmittedMessages: b.Counter(metaChangefeedEmittedMessages),
553553
FilteredMessages: b.Counter(metaChangefeedFilteredMessages),
554554
MessageSize: b.Histogram(metric.HistogramOptions{
555-
Metadata: metaMessageSize,
556-
Duration: histogramWindow,
557-
MaxVal: 10 << 20, /* 10MB max message size */
558-
SigFigs: 1,
559-
Buckets: metric.DataSize16MBBuckets,
555+
Metadata: metaMessageSize,
556+
Duration: histogramWindow,
557+
MaxVal: 10 << 20, /* 10MB max message size */
558+
SigFigs: 1,
559+
BucketConfig: metric.DataSize16MBBuckets,
560560
}),
561561
EmittedBytes: b.Counter(metaChangefeedEmittedBytes),
562562
FlushedBytes: b.Counter(metaChangefeedFlushedBytes),
563563
Flushes: b.Counter(metaChangefeedFlushes),
564564
SizeBasedFlushes: b.Counter(metaSizeBasedFlushes),
565565
ParallelIOQueueNanos: b.Histogram(metric.HistogramOptions{
566-
Metadata: metaChangefeedParallelIOQueueNanos,
567-
Duration: histogramWindow,
568-
MaxVal: changefeedIOQueueMaxLatency.Nanoseconds(),
569-
SigFigs: 2,
570-
Buckets: metric.BatchProcessLatencyBuckets,
566+
Metadata: metaChangefeedParallelIOQueueNanos,
567+
Duration: histogramWindow,
568+
MaxVal: changefeedIOQueueMaxLatency.Nanoseconds(),
569+
SigFigs: 2,
570+
BucketConfig: metric.BatchProcessLatencyBuckets,
571571
}),
572572
SinkIOInflight: b.Gauge(metaChangefeedSinkIOInflight),
573573

574574
BatchHistNanos: b.Histogram(metric.HistogramOptions{
575-
Metadata: metaChangefeedBatchHistNanos,
576-
Duration: histogramWindow,
577-
MaxVal: changefeedBatchHistMaxLatency.Nanoseconds(),
578-
SigFigs: 1,
579-
Buckets: metric.BatchProcessLatencyBuckets,
575+
Metadata: metaChangefeedBatchHistNanos,
576+
Duration: histogramWindow,
577+
MaxVal: changefeedBatchHistMaxLatency.Nanoseconds(),
578+
SigFigs: 1,
579+
BucketConfig: metric.BatchProcessLatencyBuckets,
580580
}),
581581
FlushHistNanos: b.Histogram(metric.HistogramOptions{
582-
Metadata: metaChangefeedFlushHistNanos,
583-
Duration: histogramWindow,
584-
MaxVal: changefeedFlushHistMaxLatency.Nanoseconds(),
585-
SigFigs: 2,
586-
Buckets: metric.BatchProcessLatencyBuckets,
582+
Metadata: metaChangefeedFlushHistNanos,
583+
Duration: histogramWindow,
584+
MaxVal: changefeedFlushHistMaxLatency.Nanoseconds(),
585+
SigFigs: 2,
586+
BucketConfig: metric.BatchProcessLatencyBuckets,
587587
}),
588588
CommitLatency: b.Histogram(metric.HistogramOptions{
589-
Metadata: metaCommitLatency,
590-
Duration: histogramWindow,
591-
MaxVal: commitLatencyMaxValue.Nanoseconds(),
592-
SigFigs: 1,
593-
Buckets: metric.BatchProcessLatencyBuckets,
589+
Metadata: metaCommitLatency,
590+
Duration: histogramWindow,
591+
MaxVal: commitLatencyMaxValue.Nanoseconds(),
592+
SigFigs: 1,
593+
BucketConfig: metric.BatchProcessLatencyBuckets,
594594
}),
595595
AdmitLatency: b.Histogram(metric.HistogramOptions{
596-
Metadata: metaAdmitLatency,
597-
Duration: histogramWindow,
598-
MaxVal: admitLatencyMaxValue.Nanoseconds(),
599-
SigFigs: 1,
600-
Buckets: metric.BatchProcessLatencyBuckets,
596+
Metadata: metaAdmitLatency,
597+
Duration: histogramWindow,
598+
MaxVal: admitLatencyMaxValue.Nanoseconds(),
599+
SigFigs: 1,
600+
BucketConfig: metric.BatchProcessLatencyBuckets,
601601
}),
602602
BackfillCount: b.Gauge(metaChangefeedBackfillCount),
603603
BackfillPendingRanges: b.Gauge(metaChangefeedBackfillPendingRanges),
@@ -712,27 +712,27 @@ func MakeMetrics(histogramWindow time.Duration) metric.Struct {
712712
Failures: metric.NewCounter(metaChangefeedFailures),
713713
QueueTimeNanos: metric.NewCounter(metaEventQueueTime),
714714
CheckpointHistNanos: metric.NewHistogram(metric.HistogramOptions{
715-
Metadata: metaChangefeedCheckpointHistNanos,
716-
Duration: histogramWindow,
717-
MaxVal: changefeedCheckpointHistMaxLatency.Nanoseconds(),
718-
SigFigs: 2,
719-
Buckets: metric.IOLatencyBuckets,
715+
Metadata: metaChangefeedCheckpointHistNanos,
716+
Duration: histogramWindow,
717+
MaxVal: changefeedCheckpointHistMaxLatency.Nanoseconds(),
718+
SigFigs: 2,
719+
BucketConfig: metric.IOLatencyBuckets,
720720
}),
721721
FrontierUpdates: metric.NewCounter(metaChangefeedFrontierUpdates),
722722
ThrottleMetrics: cdcutils.MakeMetrics(histogramWindow),
723723
// Below two metrics were never implemented using the hdr histogram. Set ForceUsePrometheus
724724
// to true.
725725
ParallelConsumerFlushNanos: metric.NewHistogram(metric.HistogramOptions{
726-
Metadata: metaChangefeedEventConsumerFlushNanos,
727-
Duration: histogramWindow,
728-
Buckets: metric.IOLatencyBuckets,
729-
Mode: metric.HistogramModePrometheus,
726+
Metadata: metaChangefeedEventConsumerFlushNanos,
727+
Duration: histogramWindow,
728+
BucketConfig: metric.IOLatencyBuckets,
729+
Mode: metric.HistogramModePrometheus,
730730
}),
731731
ParallelConsumerConsumeNanos: metric.NewHistogram(metric.HistogramOptions{
732-
Metadata: metaChangefeedEventConsumerConsumeNanos,
733-
Duration: histogramWindow,
734-
Buckets: metric.IOLatencyBuckets,
735-
Mode: metric.HistogramModePrometheus,
732+
Metadata: metaChangefeedEventConsumerConsumeNanos,
733+
Duration: histogramWindow,
734+
BucketConfig: metric.IOLatencyBuckets,
735+
Mode: metric.HistogramModePrometheus,
736736
}),
737737
ParallelConsumerInFlightEvents: metric.NewGauge(metaChangefeedEventConsumerInFlightEvents),
738738
}

pkg/ccl/sqlproxyccl/connector_test.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -381,10 +381,10 @@ func TestConnector_dialTenantCluster(t *testing.T) {
381381
c := &connector{
382382
TenantID: roachpb.MustMakeTenantID(42),
383383
DialTenantLatency: metric.NewHistogram(metric.HistogramOptions{
384-
Mode: metric.HistogramModePrometheus,
385-
Metadata: metaDialTenantLatency,
386-
Duration: time.Millisecond,
387-
Buckets: metric.IOLatencyBuckets,
384+
Mode: metric.HistogramModePrometheus,
385+
Metadata: metaDialTenantLatency,
386+
Duration: time.Millisecond,
387+
BucketConfig: metric.IOLatencyBuckets,
388388
}),
389389
DialTenantRetries: metric.NewCounter(metaDialTenantRetries),
390390
}
@@ -466,10 +466,10 @@ func TestConnector_dialTenantCluster(t *testing.T) {
466466

467467
c := &connector{
468468
DialTenantLatency: metric.NewHistogram(metric.HistogramOptions{
469-
Mode: metric.HistogramModePreferHdrLatency,
470-
Metadata: metaDialTenantLatency,
471-
Duration: time.Millisecond,
472-
Buckets: metric.IOLatencyBuckets,
469+
Mode: metric.HistogramModePreferHdrLatency,
470+
Metadata: metaDialTenantLatency,
471+
Duration: time.Millisecond,
472+
BucketConfig: metric.IOLatencyBuckets,
473473
}),
474474
DialTenantRetries: metric.NewCounter(metaDialTenantRetries),
475475
}
@@ -500,10 +500,10 @@ func TestConnector_dialTenantCluster(t *testing.T) {
500500
c := &connector{
501501
TenantID: roachpb.MustMakeTenantID(42),
502502
DialTenantLatency: metric.NewHistogram(metric.HistogramOptions{
503-
Mode: metric.HistogramModePreferHdrLatency,
504-
Metadata: metaDialTenantLatency,
505-
Duration: time.Millisecond,
506-
Buckets: metric.IOLatencyBuckets,
503+
Mode: metric.HistogramModePreferHdrLatency,
504+
Metadata: metaDialTenantLatency,
505+
Duration: time.Millisecond,
506+
BucketConfig: metric.IOLatencyBuckets,
507507
}),
508508
DialTenantRetries: metric.NewCounter(metaDialTenantRetries),
509509
}

pkg/ccl/sqlproxyccl/metrics.go

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -234,19 +234,19 @@ func makeProxyMetrics() metrics {
234234
RefusedConnCount: metric.NewCounter(metaRefusedConnCount),
235235
SuccessfulConnCount: metric.NewCounter(metaSuccessfulConnCount),
236236
ConnectionLatency: metric.NewHistogram(metric.HistogramOptions{
237-
Mode: metric.HistogramModePreferHdrLatency,
238-
Metadata: metaConnMigrationAttemptedCount,
239-
Duration: base.DefaultHistogramWindowInterval(),
240-
Buckets: metric.IOLatencyBuckets,
237+
Mode: metric.HistogramModePreferHdrLatency,
238+
Metadata: metaConnMigrationAttemptedCount,
239+
Duration: base.DefaultHistogramWindowInterval(),
240+
BucketConfig: metric.IOLatencyBuckets,
241241
}),
242242
AuthFailedCount: metric.NewCounter(metaAuthFailedCount),
243243
ExpiredClientConnCount: metric.NewCounter(metaExpiredClientConnCount),
244244
// Connector metrics.
245245
DialTenantLatency: metric.NewHistogram(metric.HistogramOptions{
246-
Mode: metric.HistogramModePreferHdrLatency,
247-
Metadata: metaDialTenantLatency,
248-
Duration: base.DefaultHistogramWindowInterval(),
249-
Buckets: metric.IOLatencyBuckets},
246+
Mode: metric.HistogramModePreferHdrLatency,
247+
Metadata: metaDialTenantLatency,
248+
Duration: base.DefaultHistogramWindowInterval(),
249+
BucketConfig: metric.IOLatencyBuckets},
250250
),
251251
DialTenantRetries: metric.NewCounter(metaDialTenantRetries),
252252
// Connection migration metrics.
@@ -255,17 +255,17 @@ func makeProxyMetrics() metrics {
255255
ConnMigrationErrorRecoverableCount: metric.NewCounter(metaConnMigrationErrorRecoverableCount),
256256
ConnMigrationAttemptedCount: metric.NewCounter(metaConnMigrationAttemptedCount),
257257
ConnMigrationAttemptedLatency: metric.NewHistogram(metric.HistogramOptions{
258-
Mode: metric.HistogramModePreferHdrLatency,
259-
Metadata: metaConnMigrationAttemptedLatency,
260-
Duration: base.DefaultHistogramWindowInterval(),
261-
Buckets: metric.IOLatencyBuckets,
258+
Mode: metric.HistogramModePreferHdrLatency,
259+
Metadata: metaConnMigrationAttemptedLatency,
260+
Duration: base.DefaultHistogramWindowInterval(),
261+
BucketConfig: metric.IOLatencyBuckets,
262262
}),
263263
ConnMigrationTransferResponseMessageSize: metric.NewHistogram(metric.HistogramOptions{
264-
Metadata: metaConnMigrationTransferResponseMessageSize,
265-
Duration: base.DefaultHistogramWindowInterval(),
266-
Buckets: metric.DataSize16MBBuckets,
267-
MaxVal: maxExpectedTransferResponseMessageSize,
268-
SigFigs: 1,
264+
Metadata: metaConnMigrationTransferResponseMessageSize,
265+
Duration: base.DefaultHistogramWindowInterval(),
266+
BucketConfig: metric.DataSize16MBBuckets,
267+
MaxVal: maxExpectedTransferResponseMessageSize,
268+
SigFigs: 1,
269269
}),
270270
QueryCancelReceivedPGWire: metric.NewCounter(metaQueryCancelReceivedPGWire),
271271
QueryCancelReceivedHTTP: metric.NewCounter(metaQueryCancelReceivedHTTP),

pkg/ccl/streamingccl/streamingest/metrics.go

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -171,25 +171,25 @@ func MakeMetrics(histogramWindow time.Duration) metric.Struct {
171171
JobProgressUpdates: metric.NewCounter(metaJobProgressUpdates),
172172
ReplanCount: metric.NewCounter(metaDistSQLReplanCount),
173173
FlushHistNanos: metric.NewHistogram(metric.HistogramOptions{
174-
Metadata: metaReplicationFlushHistNanos,
175-
Duration: histogramWindow,
176-
Buckets: metric.BatchProcessLatencyBuckets,
177-
MaxVal: streamingFlushHistMaxLatency.Nanoseconds(),
178-
SigFigs: 1,
174+
Metadata: metaReplicationFlushHistNanos,
175+
Duration: histogramWindow,
176+
BucketConfig: metric.BatchProcessLatencyBuckets,
177+
MaxVal: streamingFlushHistMaxLatency.Nanoseconds(),
178+
SigFigs: 1,
179179
}),
180180
CommitLatency: metric.NewHistogram(metric.HistogramOptions{
181-
Metadata: metaReplicationCommitLatency,
182-
Duration: histogramWindow,
183-
Buckets: metric.BatchProcessLatencyBuckets,
184-
MaxVal: streamingCommitLatencyMaxValue.Nanoseconds(),
185-
SigFigs: 1,
181+
Metadata: metaReplicationCommitLatency,
182+
Duration: histogramWindow,
183+
BucketConfig: metric.BatchProcessLatencyBuckets,
184+
MaxVal: streamingCommitLatencyMaxValue.Nanoseconds(),
185+
SigFigs: 1,
186186
}),
187187
AdmitLatency: metric.NewHistogram(metric.HistogramOptions{
188-
Metadata: metaReplicationAdmitLatency,
189-
Duration: histogramWindow,
190-
Buckets: metric.BatchProcessLatencyBuckets,
191-
MaxVal: streamingAdmitLatencyMaxValue.Nanoseconds(),
192-
SigFigs: 1,
188+
Metadata: metaReplicationAdmitLatency,
189+
Duration: histogramWindow,
190+
BucketConfig: metric.BatchProcessLatencyBuckets,
191+
MaxVal: streamingAdmitLatencyMaxValue.Nanoseconds(),
192+
SigFigs: 1,
193193
}),
194194
RunningCount: metric.NewGauge(metaStreamsRunning),
195195
EarliestDataCheckpointSpan: metric.NewGauge(metaEarliestDataCheckpointSpan),

pkg/cmd/roachtest/tests/import_cancellation.go

Lines changed: 15 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -29,27 +29,23 @@ import (
2929
)
3030

3131
func registerImportCancellation(r registry.Registry) {
32-
for _, rangeTombstones := range []bool{true, false} {
33-
r.Add(registry.TestSpec{
34-
Name: fmt.Sprintf(`import-cancellation/rangeTs=%t`, rangeTombstones),
35-
Owner: registry.OwnerDisasterRecovery,
36-
Benchmark: true,
37-
Timeout: 4 * time.Hour,
38-
Cluster: r.MakeClusterSpec(6, spec.CPU(32)),
39-
Leases: registry.MetamorphicLeases,
40-
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
41-
if c.Spec().Cloud != spec.GCE {
42-
t.Skip("uses gs://cockroach-fixtures; see https://github.com/cockroachdb/cockroach/issues/105968")
43-
}
44-
runImportCancellation(ctx, t, c, rangeTombstones)
45-
},
46-
})
47-
}
32+
r.Add(registry.TestSpec{
33+
Name: `import-cancellation`,
34+
Owner: registry.OwnerDisasterRecovery,
35+
Benchmark: true,
36+
Timeout: 4 * time.Hour,
37+
Cluster: r.MakeClusterSpec(6, spec.CPU(32)),
38+
Leases: registry.MetamorphicLeases,
39+
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
40+
if c.Spec().Cloud != spec.GCE {
41+
t.Skip("uses gs://cockroach-fixtures; see https://github.com/cockroachdb/cockroach/issues/105968")
42+
}
43+
runImportCancellation(ctx, t, c)
44+
},
45+
})
4846
}
4947

50-
func runImportCancellation(
51-
ctx context.Context, t test.Test, c cluster.Cluster, rangeTombstones bool,
52-
) {
48+
func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster) {
5349
c.Put(ctx, t.Cockroach(), "./cockroach")
5450
c.Put(ctx, t.DeprecatedWorkload(), "./workload") // required for tpch
5551
c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings())
@@ -81,15 +77,6 @@ func runImportCancellation(
8177
if _, err := conn.Exec(`SET CLUSTER SETTING kv.bulk_ingest.max_index_buffer_size = '2gb'`); err != nil {
8278
t.Fatal(err)
8379
}
84-
// Enable MVCC Range tombstones, if required.
85-
rtEnable := "f"
86-
if rangeTombstones {
87-
rtEnable = "t"
88-
}
89-
stmt := fmt.Sprintf(`SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = '%s'`, rtEnable)
90-
if _, err := conn.Exec(stmt); err != nil {
91-
t.Fatal(err)
92-
}
9380

9481
seed := int64(1666467482296309000)
9582
rng := randutil.NewTestRandWithSeed(seed)

pkg/cmd/roachtest/tests/npgsql_blocklist.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -745,8 +745,14 @@ var npgsqlBlocklist = blocklist{
745745
}
746746

747747
var npgsqlIgnoreList = blocklist{
748+
`Npgsql.Tests.CommandTests(Multiplexing).QueryNonQuery`: "flaky",
749+
`Npgsql.Tests.CommandTests(Multiplexing).SingleNonQuery`: "flaky",
748750
`Npgsql.Tests.CommandTests(Multiplexing).Statement_mapped_output_parameters(Default)`: "flaky",
751+
`Npgsql.Tests.CommandTests(NonMultiplexing).Cached_command_clears_parameters_placeholder_type`: "flaky",
752+
`Npgsql.Tests.CommandTests(NonMultiplexing).CloseConnection_with_exception`: "flaky",
753+
`Npgsql.Tests.CommandTests(NonMultiplexing).Cursor_move_RecordsAffected `: "flaky",
749754
`Npgsql.Tests.CommandTests(NonMultiplexing).Statement_mapped_output_parameters(SequentialAccess)`: "flaky",
755+
`Npgsql.Tests.CommandTests(NonMultiplexing).Use_across_connection_change(Prepared)`: "flaky",
750756
`Npgsql.Tests.ConnectionTests(NonMultiplexing).PostgreSqlVersion_ServerVersion`: "flaky",
751757
`Npgsql.Tests.ConnectionTests(NonMultiplexing).Connector_not_initialized_exception`: "flaky",
752758
`Npgsql.Tests.ConnectionTests(NonMultiplexing).Many_open_close_with_transaction`: "flaky",

pkg/kv/bulk/bulk_metrics.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,11 @@ const log10int64times1000 = 19 * 1000
5252
func MakeBulkMetrics(histogramWindow time.Duration) Metrics {
5353
return Metrics{
5454
MaxBytesHist: metric.NewHistogram(metric.HistogramOptions{
55-
Metadata: metaMemMaxBytes,
56-
Duration: histogramWindow,
57-
MaxVal: log10int64times1000,
58-
SigFigs: 3,
59-
Buckets: metric.MemoryUsage64MBBuckets,
55+
Metadata: metaMemMaxBytes,
56+
Duration: histogramWindow,
57+
MaxVal: log10int64times1000,
58+
SigFigs: 3,
59+
BucketConfig: metric.MemoryUsage64MBBuckets,
6060
}),
6161
CurBytesCount: metric.NewGauge(metaMemCurBytes),
6262
}

pkg/kv/kvclient/kvcoord/txn_metrics.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -285,20 +285,20 @@ func MakeTxnMetrics(histogramWindow time.Duration) TxnMetrics {
285285
ClientRefreshAutoRetries: metric.NewCounter(metaClientRefreshAutoRetries),
286286
ServerRefreshSuccess: metric.NewCounter(metaServerRefreshSuccess),
287287
Durations: metric.NewHistogram(metric.HistogramOptions{
288-
Mode: metric.HistogramModePreferHdrLatency,
289-
Metadata: metaDurationsHistograms,
290-
Duration: histogramWindow,
291-
Buckets: metric.IOLatencyBuckets,
288+
Mode: metric.HistogramModePreferHdrLatency,
289+
Metadata: metaDurationsHistograms,
290+
Duration: histogramWindow,
291+
BucketConfig: metric.IOLatencyBuckets,
292292
}),
293293
TxnsWithCondensedIntents: metric.NewCounter(metaTxnsWithCondensedIntentSpans),
294294
TxnsWithCondensedIntentsGauge: metric.NewGauge(metaTxnsWithCondensedIntentSpansGauge),
295295
TxnsRejectedByLockSpanBudget: metric.NewCounter(metaTxnsRejectedByLockSpanBudget),
296296
Restarts: metric.NewHistogram(metric.HistogramOptions{
297-
Metadata: metaRestartsHistogram,
298-
Duration: histogramWindow,
299-
MaxVal: 100,
300-
SigFigs: 3,
301-
Buckets: metric.Count1KBuckets,
297+
Metadata: metaRestartsHistogram,
298+
Duration: histogramWindow,
299+
MaxVal: 100,
300+
SigFigs: 3,
301+
BucketConfig: metric.Count1KBuckets,
302302
}),
303303
RestartsWriteTooOld: telemetry.NewCounterWithMetric(metaRestartsWriteTooOld),
304304
RestartsWriteTooOldMulti: telemetry.NewCounterWithMetric(metaRestartsWriteTooOldMulti),

0 commit comments

Comments
 (0)