From 869d3f8e36453cf785485916c7f632cc151d6b0f Mon Sep 17 00:00:00 2001 From: Israel Blancas Date: Wed, 15 Oct 2025 18:11:49 +0200 Subject: [PATCH] Add retry dropped item metrics and an exhausted retry error marker for exporter helper retries Signed-off-by: Israel Blancas --- .chloggen/13956.yaml | 26 ++++++++++ exporter/exporterhelper/documentation.md | 24 ++++++++++ .../exporterhelper/internal/experr/err.go | 25 ++++++++-- .../internal/experr/err_test.go | 12 +++++ .../internal/metadata/generated_telemetry.go | 21 ++++++++ .../metadatatest/generated_telemetrytest.go | 48 +++++++++++++++++++ .../generated_telemetrytest_test.go | 12 +++++ .../internal/obs_report_sender.go | 23 ++++++--- .../internal/obs_report_sender_test.go | 38 +++++++++++++++ .../exporterhelper/internal/retry_sender.go | 16 +++++-- .../internal/retry_sender_test.go | 21 ++++++++ exporter/exporterhelper/metadata.yaml | 30 ++++++++++++ 12 files changed, 282 insertions(+), 14 deletions(-) create mode 100644 .chloggen/13956.yaml diff --git a/.chloggen/13956.yaml b/.chloggen/13956.yaml new file mode 100644 index 00000000000..daef84c377d --- /dev/null +++ b/.chloggen/13956.yaml @@ -0,0 +1,26 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: pkg/exporterhelper + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add retry dropped item metrics and an exhausted retry error marker for exporter helper retries. + +# One or more tracking issues or pull requests related to the change +issues: [13956] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: |- + New counters `otelcol_exporter_retry_dropped_{spans,metric_points,log_records}` capture items discarded after exhausting retries while `IsRetriesExhaustedErr` detects the terminal retry outcome. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/exporterhelper/documentation.md b/exporter/exporterhelper/documentation.md index 9b00977f9dc..5ab1b552cd0 100644 --- a/exporter/exporterhelper/documentation.md +++ b/exporter/exporterhelper/documentation.md @@ -62,6 +62,30 @@ Current size of the retry queue (in batches). [alpha] | ---- | ----------- | ---------- | --------- | | {batches} | Gauge | Int | alpha | +### otelcol_exporter_retry_dropped_log_records + +Number of log records dropped after exhausting configured retries. [alpha] + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {records} | Sum | Int | true | alpha | + +### otelcol_exporter_retry_dropped_metric_points + +Number of metric points dropped after exhausting configured retries. [alpha] + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {datapoints} | Sum | Int | true | alpha | + +### otelcol_exporter_retry_dropped_spans + +Number of spans dropped after exhausting configured retries. [alpha] + +| Unit | Metric Type | Value Type | Monotonic | Stability | +| ---- | ----------- | ---------- | --------- | --------- | +| {spans} | Sum | Int | true | alpha | + ### otelcol_exporter_send_failed_log_records Number of log records in failed attempts to send to destination. [alpha] diff --git a/exporter/exporterhelper/internal/experr/err.go b/exporter/exporterhelper/internal/experr/err.go index 4080e0369d6..b2dbf382048 100644 --- a/exporter/exporterhelper/internal/experr/err.go +++ b/exporter/exporterhelper/internal/experr/err.go @@ -3,9 +3,7 @@ package experr // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr" -import ( - "errors" -) +import "errors" type shutdownErr struct { err error @@ -27,3 +25,24 @@ func IsShutdownErr(err error) bool { var sdErr shutdownErr return errors.As(err, &sdErr) } + +type retriesExhaustedErr struct { + err error +} + +func NewRetriesExhaustedErr(err error) error { + return retriesExhaustedErr{err: err} +} + +func (r retriesExhaustedErr) Error() string { + return "retries exhausted: " + r.err.Error() +} + +func (r retriesExhaustedErr) Unwrap() error { + return r.err +} + +func IsRetriesExhaustedErr(err error) bool { + var reErr retriesExhaustedErr + return errors.As(err, &reErr) +} diff --git a/exporter/exporterhelper/internal/experr/err_test.go b/exporter/exporterhelper/internal/experr/err_test.go index ac0580025e5..c7e64cd56b9 100644 --- a/exporter/exporterhelper/internal/experr/err_test.go +++ b/exporter/exporterhelper/internal/experr/err_test.go @@ -22,3 +22,15 @@ func TestIsShutdownErr(t *testing.T) { err = NewShutdownErr(err) require.True(t, IsShutdownErr(err)) } + +func TestNewRetriesExhaustedErr(t *testing.T) { + err := NewRetriesExhaustedErr(errors.New("another error")) + assert.Equal(t, "retries exhausted: another error", err.Error()) +} + +func TestIsRetriesExhaustedErr(t *testing.T) { + err := errors.New("testError") + require.False(t, IsRetriesExhaustedErr(err)) + err = NewRetriesExhaustedErr(err) + require.True(t, IsRetriesExhaustedErr(err)) +} diff --git a/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/exporter/exporterhelper/internal/metadata/generated_telemetry.go index 66114dd2d3c..60e8d9cb6e6 100644 --- a/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -35,6 +35,9 @@ type TelemetryBuilder struct { ExporterQueueBatchSendSizeBytes metric.Int64Histogram ExporterQueueCapacity metric.Int64ObservableGauge ExporterQueueSize metric.Int64ObservableGauge + ExporterRetryDroppedLogRecords metric.Int64Counter + ExporterRetryDroppedMetricPoints metric.Int64Counter + ExporterRetryDroppedSpans metric.Int64Counter ExporterSendFailedLogRecords metric.Int64Counter ExporterSendFailedMetricPoints metric.Int64Counter ExporterSendFailedSpans metric.Int64Counter @@ -156,6 +159,24 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme metric.WithUnit("{batches}"), ) errs = errors.Join(errs, err) + builder.ExporterRetryDroppedLogRecords, err = builder.meter.Int64Counter( + "otelcol_exporter_retry_dropped_log_records", + metric.WithDescription("Number of log records dropped after exhausting configured retries. [alpha]"), + metric.WithUnit("{records}"), + ) + errs = errors.Join(errs, err) + builder.ExporterRetryDroppedMetricPoints, err = builder.meter.Int64Counter( + "otelcol_exporter_retry_dropped_metric_points", + metric.WithDescription("Number of metric points dropped after exhausting configured retries. [alpha]"), + metric.WithUnit("{datapoints}"), + ) + errs = errors.Join(errs, err) + builder.ExporterRetryDroppedSpans, err = builder.meter.Int64Counter( + "otelcol_exporter_retry_dropped_spans", + metric.WithDescription("Number of spans dropped after exhausting configured retries. [alpha]"), + metric.WithUnit("{spans}"), + ) + errs = errors.Join(errs, err) builder.ExporterSendFailedLogRecords, err = builder.meter.Int64Counter( "otelcol_exporter_send_failed_log_records", metric.WithDescription("Number of log records in failed attempts to send to destination. [alpha]"), diff --git a/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest.go b/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest.go index 71d1763ce5d..63c84581f8d 100644 --- a/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest.go +++ b/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest.go @@ -118,6 +118,54 @@ func AssertEqualExporterQueueSize(t *testing.T, tt *componenttest.Telemetry, dps metricdatatest.AssertEqual(t, want, got, opts...) } +func AssertEqualExporterRetryDroppedLogRecords(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) { + want := metricdata.Metrics{ + Name: "otelcol_exporter_retry_dropped_log_records", + Description: "Number of log records dropped after exhausting configured retries. [alpha]", + Unit: "{records}", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: dps, + }, + } + got, err := tt.GetMetric("otelcol_exporter_retry_dropped_log_records") + require.NoError(t, err) + metricdatatest.AssertEqual(t, want, got, opts...) +} + +func AssertEqualExporterRetryDroppedMetricPoints(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) { + want := metricdata.Metrics{ + Name: "otelcol_exporter_retry_dropped_metric_points", + Description: "Number of metric points dropped after exhausting configured retries. [alpha]", + Unit: "{datapoints}", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: dps, + }, + } + got, err := tt.GetMetric("otelcol_exporter_retry_dropped_metric_points") + require.NoError(t, err) + metricdatatest.AssertEqual(t, want, got, opts...) +} + +func AssertEqualExporterRetryDroppedSpans(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) { + want := metricdata.Metrics{ + Name: "otelcol_exporter_retry_dropped_spans", + Description: "Number of spans dropped after exhausting configured retries. [alpha]", + Unit: "{spans}", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: dps, + }, + } + got, err := tt.GetMetric("otelcol_exporter_retry_dropped_spans") + require.NoError(t, err) + metricdatatest.AssertEqual(t, want, got, opts...) +} + func AssertEqualExporterSendFailedLogRecords(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) { want := metricdata.Metrics{ Name: "otelcol_exporter_send_failed_log_records", diff --git a/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest_test.go b/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest_test.go index 838a03f504a..578cf883e44 100644 --- a/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest_test.go +++ b/exporter/exporterhelper/internal/metadatatest/generated_telemetrytest_test.go @@ -33,6 +33,9 @@ func TestSetupTelemetry(t *testing.T) { tb.ExporterEnqueueFailedSpans.Add(context.Background(), 1) tb.ExporterQueueBatchSendSize.Record(context.Background(), 1) tb.ExporterQueueBatchSendSizeBytes.Record(context.Background(), 1) + tb.ExporterRetryDroppedLogRecords.Add(context.Background(), 1) + tb.ExporterRetryDroppedMetricPoints.Add(context.Background(), 1) + tb.ExporterRetryDroppedSpans.Add(context.Background(), 1) tb.ExporterSendFailedLogRecords.Add(context.Background(), 1) tb.ExporterSendFailedMetricPoints.Add(context.Background(), 1) tb.ExporterSendFailedSpans.Add(context.Background(), 1) @@ -60,6 +63,15 @@ func TestSetupTelemetry(t *testing.T) { AssertEqualExporterQueueSize(t, testTel, []metricdata.DataPoint[int64]{{Value: 1}}, metricdatatest.IgnoreTimestamp()) + AssertEqualExporterRetryDroppedLogRecords(t, testTel, + []metricdata.DataPoint[int64]{{Value: 1}}, + metricdatatest.IgnoreTimestamp()) + AssertEqualExporterRetryDroppedMetricPoints(t, testTel, + []metricdata.DataPoint[int64]{{Value: 1}}, + metricdatatest.IgnoreTimestamp()) + AssertEqualExporterRetryDroppedSpans(t, testTel, + []metricdata.DataPoint[int64]{{Value: 1}}, + metricdatatest.IgnoreTimestamp()) AssertEqualExporterSendFailedLogRecords(t, testTel, []metricdata.DataPoint[int64]{{Value: 1}}, metricdatatest.IgnoreTimestamp()) diff --git a/exporter/exporterhelper/internal/obs_report_sender.go b/exporter/exporterhelper/internal/obs_report_sender.go index 3438863ff51..8f1cdac69c2 100644 --- a/exporter/exporterhelper/internal/obs_report_sender.go +++ b/exporter/exporterhelper/internal/obs_report_sender.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/request" @@ -40,13 +41,14 @@ type obsReportSender[K request.Request] struct { component.StartFunc component.ShutdownFunc - spanName string - tracer trace.Tracer - spanAttrs trace.SpanStartEventOption - metricAttr metric.MeasurementOption - itemsSentInst metric.Int64Counter - itemsFailedInst metric.Int64Counter - next sender.Sender[K] + spanName string + tracer trace.Tracer + spanAttrs trace.SpanStartEventOption + metricAttr metric.MeasurementOption + itemsSentInst metric.Int64Counter + itemsFailedInst metric.Int64Counter + itemsRetryDroppedInst metric.Int64Counter + next sender.Sender[K] } func newObsReportSender[K request.Request](set exporter.Settings, signal pipeline.Signal, next sender.Sender[K]) (sender.Sender[K], error) { @@ -70,14 +72,17 @@ func newObsReportSender[K request.Request](set exporter.Settings, signal pipelin case pipeline.SignalTraces: or.itemsSentInst = telemetryBuilder.ExporterSentSpans or.itemsFailedInst = telemetryBuilder.ExporterSendFailedSpans + or.itemsRetryDroppedInst = telemetryBuilder.ExporterRetryDroppedSpans case pipeline.SignalMetrics: or.itemsSentInst = telemetryBuilder.ExporterSentMetricPoints or.itemsFailedInst = telemetryBuilder.ExporterSendFailedMetricPoints + or.itemsRetryDroppedInst = telemetryBuilder.ExporterRetryDroppedMetricPoints case pipeline.SignalLogs: or.itemsSentInst = telemetryBuilder.ExporterSentLogRecords or.itemsFailedInst = telemetryBuilder.ExporterSendFailedLogRecords + or.itemsRetryDroppedInst = telemetryBuilder.ExporterRetryDroppedLogRecords } return or, nil @@ -116,6 +121,10 @@ func (ors *obsReportSender[K]) endOp(ctx context.Context, numLogRecords int, err if ors.itemsFailedInst != nil { ors.itemsFailedInst.Add(ctx, numFailedToSend, ors.metricAttr) } + // Count drops after retries were exhausted. + if err != nil && ors.itemsRetryDroppedInst != nil && experr.IsRetriesExhaustedErr(err) { + ors.itemsRetryDroppedInst.Add(ctx, numFailedToSend, ors.metricAttr) + } span := trace.SpanFromContext(ctx) defer span.End() diff --git a/exporter/exporterhelper/internal/obs_report_sender_test.go b/exporter/exporterhelper/internal/obs_report_sender_test.go index 0f54aef7853..d7b1484b0b4 100644 --- a/exporter/exporterhelper/internal/obs_report_sender_test.go +++ b/exporter/exporterhelper/internal/obs_report_sender_test.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadatatest" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/request" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/requesttest" @@ -31,6 +32,43 @@ var ( errFake = errors.New("errFake") ) +func TestExportTraceRetryDroppedMetric(t *testing.T) { + tt := componenttest.NewTelemetry() + t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) + + obsrep, err := newObsReportSender( + exporter.Settings{ID: exporterID, TelemetrySettings: tt.NewTelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + pipeline.SignalTraces, + sender.NewSender(func(context.Context, request.Request) error { + return experr.NewRetriesExhaustedErr(errFake) + }), + ) + require.NoError(t, err) + + req := &requesttest.FakeRequest{Items: 7} + sendErr := obsrep.Send(context.Background(), req) + require.Error(t, sendErr) + require.True(t, experr.IsRetriesExhaustedErr(sendErr)) + + wantAttrs := attribute.NewSet(attribute.String("exporter", exporterID.String())) + + metadatatest.AssertEqualExporterSendFailedSpans(t, tt, + []metricdata.DataPoint[int64]{ + { + Attributes: wantAttrs, + Value: int64(req.Items), + }, + }, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars()) + + metadatatest.AssertEqualExporterRetryDroppedSpans(t, tt, + []metricdata.DataPoint[int64]{ + { + Attributes: wantAttrs, + Value: int64(req.Items), + }, + }, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars()) +} + func TestExportTraceDataOp(t *testing.T) { tt := componenttest.NewTelemetry() t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) diff --git a/exporter/exporterhelper/internal/retry_sender.go b/exporter/exporterhelper/internal/retry_sender.go index 815d604e89a..22fd8e1a6c3 100644 --- a/exporter/exporterhelper/internal/retry_sender.go +++ b/exporter/exporterhelper/internal/retry_sender.go @@ -79,10 +79,17 @@ func (rs *retrySender) Send(ctx context.Context, req request.Request) error { } span := trace.SpanFromContext(ctx) retryNum := int64(0) + retried := false var maxElapsedTime time.Time if rs.cfg.MaxElapsedTime > 0 { maxElapsedTime = time.Now().Add(rs.cfg.MaxElapsedTime) } + wrapRetryErr := func(e error) error { + if retried { + return experr.NewRetriesExhaustedErr(e) + } + return e + } for { span.AddEvent( "Sending request.", @@ -104,7 +111,7 @@ func (rs *retrySender) Send(ctx context.Context, req request.Request) error { backoffDelay := expBackoff.NextBackOff() if backoffDelay == backoff.Stop { - return fmt.Errorf("no more retries left: %w", err) + return wrapRetryErr(fmt.Errorf("no more retries left: %w", err)) } throttleErr := throttleRetry{} @@ -115,13 +122,13 @@ func (rs *retrySender) Send(ctx context.Context, req request.Request) error { nextRetryTime := time.Now().Add(backoffDelay) if !maxElapsedTime.IsZero() && maxElapsedTime.Before(nextRetryTime) { // The delay is longer than the maxElapsedTime. - return fmt.Errorf("no more retries left: %w", err) + return wrapRetryErr(fmt.Errorf("no more retries left: %w", err)) } if deadline, has := ctx.Deadline(); has && deadline.Before(nextRetryTime) { // The delay is longer than the deadline. There is no point in // waiting for cancelation. - return fmt.Errorf("request will be cancelled before next retry: %w", err) + return wrapRetryErr(fmt.Errorf("request will be cancelled before next retry: %w", err)) } backoffDelayStr := backoffDelay.String() @@ -140,10 +147,11 @@ func (rs *retrySender) Send(ctx context.Context, req request.Request) error { // back-off, but get interrupted when shutting down or request is cancelled or timed out. select { case <-ctx.Done(): - return fmt.Errorf("request is cancelled or timed out: %w", err) + return wrapRetryErr(fmt.Errorf("request is cancelled or timed out: %w", err)) case <-rs.stopCh: return experr.NewShutdownErr(err) case <-time.After(backoffDelay): + retried = true } } } diff --git a/exporter/exporterhelper/internal/retry_sender_test.go b/exporter/exporterhelper/internal/retry_sender_test.go index 387e41e0aae..554cc14e4ba 100644 --- a/exporter/exporterhelper/internal/retry_sender_test.go +++ b/exporter/exporterhelper/internal/retry_sender_test.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/request" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/requesttest" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/sender" @@ -76,6 +77,26 @@ func TestRetrySenderMaxElapsedTime(t *testing.T) { require.NoError(t, rs.Shutdown(context.Background())) } +func TestRetrySenderRetriesExhaustedErrorWrapped(t *testing.T) { + rCfg := configretry.NewDefaultBackOffConfig() + rCfg.InitialInterval = time.Millisecond + rCfg.RandomizationFactor = 0 + rCfg.Multiplier = 1 + rCfg.MaxInterval = time.Millisecond + rCfg.MaxElapsedTime = 4 * time.Millisecond + var attempts int + rs := newRetrySender(rCfg, exportertest.NewNopSettings(exportertest.NopType), sender.NewSender(func(context.Context, request.Request) error { + attempts++ + return errors.New("transient error") + })) + require.NoError(t, rs.Start(context.Background(), componenttest.NewNopHost())) + err := rs.Send(context.Background(), &requesttest.FakeRequest{Items: 2}) + require.Error(t, err) + require.True(t, experr.IsRetriesExhaustedErr(err)) + require.GreaterOrEqual(t, attempts, 2) + require.NoError(t, rs.Shutdown(context.Background())) +} + func TestRetrySenderThrottleError(t *testing.T) { rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = 10 * time.Millisecond diff --git a/exporter/exporterhelper/metadata.yaml b/exporter/exporterhelper/metadata.yaml index 0af14cce32e..a6197d223af 100644 --- a/exporter/exporterhelper/metadata.yaml +++ b/exporter/exporterhelper/metadata.yaml @@ -33,6 +33,16 @@ telemetry: value_type: int monotonic: true + exporter_retry_dropped_spans: + enabled: true + stability: + level: alpha + description: Number of spans dropped after exhausting configured retries. + unit: "{spans}" + sum: + value_type: int + monotonic: true + exporter_enqueue_failed_spans: enabled: true stability: @@ -63,6 +73,16 @@ telemetry: value_type: int monotonic: true + exporter_retry_dropped_metric_points: + enabled: true + stability: + level: alpha + description: Number of metric points dropped after exhausting configured retries. + unit: "{datapoints}" + sum: + value_type: int + monotonic: true + exporter_enqueue_failed_metric_points: enabled: true stability: @@ -93,6 +113,16 @@ telemetry: value_type: int monotonic: true + exporter_retry_dropped_log_records: + enabled: true + stability: + level: alpha + description: Number of log records dropped after exhausting configured retries. + unit: "{records}" + sum: + value_type: int + monotonic: true + exporter_enqueue_failed_log_records: enabled: true stability: