diff --git a/CHANGELOG.md b/CHANGELOG.md index 605d7dba8f6..6400a21443e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Add unmarshaling and validation for `OTLPHttpExporter`, `OTLPGrpcExporter`, `OTLPGrpcMetricExporter` and `OTLPHttpMetricExporter` to v1.0.0 model in `go.opentelemetry.io/contrib/otelconf`. (#8112) - Add a `WithSpanNameFormatter` option to `go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/v2/mongo/otelmongo`. (#7986) - Add unmarshaling and validation for `AttributeType`, `AttributeNameValue`, `SimpleSpanProcessor`, `SimpleLogRecordProcessor`, `ZipkinSpanExporter`, `NameStringValuePair`, `InstrumentType`, `ExperimentalPeerInstrumentationServiceMappingElem`, `ExporterDefaultHistogramAggregation`, `PullMetricReader` to v1.0.0 model in `go.opentelemetry.io/contrib/otelconf`. (#8127) +- Updated `go.opentelemetry.io/contrib/otelconf` to include the [v1.0.0-rc2](https://github.com/open-telemetry/opentelemetry-configuration/releases/tag/v1.0.0-rc.2) release candidate of schema which includes backwards incompatible changes. (#8026) ### Changed diff --git a/Makefile b/Makefile index 12a7f5c9e5e..d3a94996fd4 100644 --- a/Makefile +++ b/Makefile @@ -329,6 +329,7 @@ OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=v1.0.0-rc.2 genjsonschema-cleanup: rm -Rf ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} +GENERATED_CONFIG_EXPERIMENTAL=./otelconf/x/generated_config.go GENERATED_CONFIG=./otelconf/generated_config.go # Generate structs for configuration from opentelemetry-configuration schema @@ -339,13 +340,13 @@ genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) --capitalization ID \ --capitalization OTLP \ --struct-name-from-title \ - --package otelconf \ + --package x \ --only-models \ - --output ${GENERATED_CONFIG} \ + --output ${GENERATED_CONFIG_EXPERIMENTAL} \ ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR}/schema/opentelemetry_configuration.json @echo Modify jsonschema generated files. - sed -f ./otelconf/jsonschema_patch.sed ${GENERATED_CONFIG} > ${GENERATED_CONFIG}.tmp - mv ${GENERATED_CONFIG}.tmp ${GENERATED_CONFIG} + sed -f ./otelconf/jsonschema_patch.sed ${GENERATED_CONFIG_EXPERIMENTAL} > ${GENERATED_CONFIG_EXPERIMENTAL}.tmp + mv ${GENERATED_CONFIG_EXPERIMENTAL}.tmp ${GENERATED_CONFIG} $(MAKE) genjsonschema-cleanup .PHONY: codespell diff --git a/otelconf/config.go b/otelconf/config.go new file mode 100644 index 00000000000..db6305803b8 --- /dev/null +++ b/otelconf/config.go @@ -0,0 +1,171 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconf provides an OpenTelemetry declarative configuration SDK. +package otelconf // import "go.opentelemetry.io/contrib/otelconf" + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/log" + nooplog "go.opentelemetry.io/otel/log/noop" + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + nooptrace "go.opentelemetry.io/otel/trace/noop" + yaml "go.yaml.in/yaml/v3" + + "go.opentelemetry.io/contrib/otelconf/internal/provider" +) + +// SDK is a struct that contains all the providers +// configured via the configuration model. +type SDK struct { + meterProvider metric.MeterProvider + tracerProvider trace.TracerProvider + loggerProvider log.LoggerProvider + shutdown shutdownFunc +} + +// TracerProvider returns a configured trace.TracerProvider. +func (s *SDK) TracerProvider() trace.TracerProvider { + return s.tracerProvider +} + +// MeterProvider returns a configured metric.MeterProvider. +func (s *SDK) MeterProvider() metric.MeterProvider { + return s.meterProvider +} + +// LoggerProvider returns a configured log.LoggerProvider. +func (s *SDK) LoggerProvider() log.LoggerProvider { + return s.loggerProvider +} + +// Shutdown calls shutdown on all configured providers. +func (s *SDK) Shutdown(ctx context.Context) error { + return s.shutdown(ctx) +} + +var noopSDK = SDK{ + loggerProvider: nooplog.LoggerProvider{}, + meterProvider: noopmetric.MeterProvider{}, + tracerProvider: nooptrace.TracerProvider{}, + shutdown: func(context.Context) error { return nil }, +} + +// NewSDK creates SDK providers based on the configuration model. +func NewSDK(opts ...ConfigurationOption) (SDK, error) { + o := configOptions{ + ctx: context.Background(), + } + for _, opt := range opts { + o = opt.apply(o) + } + if o.opentelemetryConfig.Disabled != nil && *o.opentelemetryConfig.Disabled { + return noopSDK, nil + } + + r, err := newResource(o.opentelemetryConfig.Resource) + if err != nil { + return noopSDK, err + } + + mp, mpShutdown, err := meterProvider(o, r) + if err != nil { + return noopSDK, err + } + + tp, tpShutdown, err := tracerProvider(o, r) + if err != nil { + return noopSDK, err + } + + lp, lpShutdown, err := loggerProvider(o, r) + if err != nil { + return noopSDK, err + } + + return SDK{ + meterProvider: mp, + tracerProvider: tp, + loggerProvider: lp, + shutdown: func(ctx context.Context) error { + return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) + }, + }, nil +} + +// ConfigurationOption configures options for providers. +type ConfigurationOption interface { + apply(configOptions) configOptions +} + +type configurationOptionFunc func(configOptions) configOptions + +func (fn configurationOptionFunc) apply(cfg configOptions) configOptions { + return fn(cfg) +} + +// WithContext sets the context.Context for the SDK. +func WithContext(ctx context.Context) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.ctx = ctx + return c + }) +} + +// WithOpenTelemetryConfiguration sets the OpenTelemetryConfiguration used +// to produce the SDK. +func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.opentelemetryConfig = cfg + return c + }) +} + +// WithLoggerProviderOptions appends LoggerProviderOptions used for constructing +// the LoggerProvider. OpenTelemetryConfiguration takes precedence over these options. +func WithLoggerProviderOptions(opts ...sdklog.LoggerProviderOption) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.loggerProviderOptions = append(c.loggerProviderOptions, opts...) + return c + }) +} + +// WithMeterProviderOptions appends metric.Options used for constructing the +// MeterProvider. OpenTelemetryConfiguration takes precedence over these options. +func WithMeterProviderOptions(opts ...sdkmetric.Option) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.meterProviderOptions = append(c.meterProviderOptions, opts...) + return c + }) +} + +// WithTracerProviderOptions appends TracerProviderOptions used for constructing +// the TracerProvider. OpenTelemetryConfiguration takes precedence over these options. +func WithTracerProviderOptions(opts ...sdktrace.TracerProviderOption) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.tracerProviderOptions = append(c.tracerProviderOptions, opts...) + return c + }) +} + +// ParseYAML parses a YAML configuration file into an OpenTelemetryConfiguration. +func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { + file, err := provider.ReplaceEnvVars(file) + if err != nil { + return nil, err + } + var cfg OpenTelemetryConfiguration + err = yaml.Unmarshal(file, &cfg) + if err != nil { + return nil, err + } + + return &cfg, nil +} diff --git a/otelconf/config_common.go b/otelconf/config_common.go index 246d29e33b4..bb304e07958 100644 --- a/otelconf/config_common.go +++ b/otelconf/config_common.go @@ -50,8 +50,8 @@ var enumValuesOTLPMetricDefaultHistogramAggregation = []any{ type configOptions struct { ctx context.Context opentelemetryConfig OpenTelemetryConfiguration - meterProviderOptions []sdkmetric.Option loggerProviderOptions []sdklog.LoggerProviderOption + meterProviderOptions []sdkmetric.Option tracerProviderOptions []sdktrace.TracerProviderOption } @@ -155,6 +155,30 @@ func newErrInvalid(id string) error { return &errInvalid{Identifier: id} } +// unmarshalSamplerTypes handles always_on and always_off sampler unmarshaling. +func unmarshalSamplerTypes(raw map[string]any, plain *Sampler) { + // always_on can be nil, must check and set here + if _, ok := raw["always_on"]; ok { + plain.AlwaysOn = AlwaysOnSampler{} + } + // always_off can be nil, must check and set here + if _, ok := raw["always_off"]; ok { + plain.AlwaysOff = AlwaysOffSampler{} + } +} + +// unmarshalMetricProducer handles opencensus metric producer unmarshaling. +func unmarshalMetricProducer(raw map[string]any, plain *MetricProducer) { + // opencensus can be nil, must check and set here + if v, ok := raw["opencensus"]; ok && v == nil { + delete(raw, "opencensus") + plain.Opencensus = OpenCensusMetricProducer{} + } + if len(raw) > 0 { + plain.AdditionalProperties = raw + } +} + // validatePeriodicMetricReader handles validation for PeriodicMetricReader. func validatePeriodicMetricReader(plain *PeriodicMetricReader) error { if plain.Timeout != nil && 0 > *plain.Timeout { diff --git a/otelconf/config_json.go b/otelconf/config_json.go index 894b5c367d8..6e29871047c 100644 --- a/otelconf/config_json.go +++ b/otelconf/config_json.go @@ -281,8 +281,7 @@ func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { return err } - err := validateBatchLogRecordProcessor((*BatchLogRecordProcessor)(&sh.Plain)) - if err != nil { + if err := validateBatchLogRecordProcessor((*BatchLogRecordProcessor)(&sh.Plain)); err != nil { return err } *j = BatchLogRecordProcessor(sh.Plain) @@ -307,14 +306,122 @@ func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { return err } - err := validateBatchSpanProcessor((*BatchSpanProcessor)(&sh.Plain)) - if err != nil { + if err := validateBatchSpanProcessor((*BatchSpanProcessor)(&sh.Plain)); err != nil { return err } *j = BatchSpanProcessor(sh.Plain) return nil } +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { + type Plain OpenTelemetryConfiguration + type shadow struct { + Plain + FileFormat json.RawMessage `json:"file_format"` + LoggerProvider json.RawMessage `json:"logger_provider"` + MeterProvider json.RawMessage `json:"meter_provider"` + TracerProvider json.RawMessage `json:"tracer_provider"` + Propagator json.RawMessage `json:"propagator"` + Resource json.RawMessage `json:"resource"` + InstrumentationDevelopment json.RawMessage `json:"instrumentation/development"` + AttributeLimits json.RawMessage `json:"attribute_limits"` + Disabled json.RawMessage `json:"disabled"` + LogLevel json.RawMessage `json:"log_level"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if len(sh.FileFormat) == 0 { + return newErrRequired(j, "file_format") + } + + if err := json.Unmarshal(sh.FileFormat, &sh.Plain.FileFormat); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.LoggerProvider != nil { + var l LoggerProviderJson + if err := json.Unmarshal(sh.LoggerProvider, &l); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.LoggerProvider = &l + } + + if sh.MeterProvider != nil { + var m MeterProviderJson + if err := json.Unmarshal(sh.MeterProvider, &m); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.MeterProvider = &m + } + + if sh.TracerProvider != nil { + var t TracerProviderJson + if err := json.Unmarshal(sh.TracerProvider, &t); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.TracerProvider = &t + } + + if sh.Propagator != nil { + var p PropagatorJson + if err := json.Unmarshal(sh.Propagator, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Propagator = &p + } + + if sh.Resource != nil { + var r ResourceJson + if err := json.Unmarshal(sh.Resource, &r); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Resource = &r + } + + if sh.InstrumentationDevelopment != nil { + var r InstrumentationJson + if err := json.Unmarshal(sh.InstrumentationDevelopment, &r); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.InstrumentationDevelopment = &r + } + + if sh.AttributeLimits != nil { + var r AttributeLimits + if err := json.Unmarshal(sh.AttributeLimits, &r); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.AttributeLimits = &r + } + + if sh.Disabled != nil { + if err := json.Unmarshal(sh.Disabled, &sh.Plain.Disabled); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + } else { + // Configure if the SDK is disabled or not. + // If omitted or null, false is used. + sh.Plain.Disabled = ptr(false) + } + + if sh.LogLevel != nil { + if err := json.Unmarshal(sh.LogLevel, &sh.Plain.LogLevel); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + } else { + // Configure the log level of the internal logger used by the SDK. + // If omitted, info is used. + sh.Plain.LogLevel = ptr("info") + } + + *j = OpenTelemetryConfiguration(sh.Plain) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { type Plain PeriodicMetricReader @@ -714,3 +821,35 @@ func (j *PullMetricReader) UnmarshalJSON(b []byte) error { *j = PullMetricReader(sh.Plain) return nil } + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Sampler) UnmarshalJSON(b []byte) error { + var raw map[string]any + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain Sampler + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + unmarshalSamplerTypes(raw, (*Sampler)(&plain)) + *j = Sampler(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *MetricProducer) UnmarshalJSON(b []byte) error { + var raw map[string]any + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain MetricProducer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + unmarshalMetricProducer(raw, (*MetricProducer)(&plain)) + *j = MetricProducer(plain) + return nil +} diff --git a/otelconf/config_test.go b/otelconf/config_test.go index 99c31e76b63..d73b9376633 100644 --- a/otelconf/config_test.go +++ b/otelconf/config_test.go @@ -4,10 +4,20 @@ package otelconf import ( + "encoding/json" + "fmt" + "os" + "path/filepath" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + lognoop "go.opentelemetry.io/otel/log/noop" + metricnoop "go.opentelemetry.io/otel/metric/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + tracenoop "go.opentelemetry.io/otel/trace/noop" "go.yaml.in/yaml/v3" ) @@ -339,6 +349,819 @@ func TestUnmarshalBatchLogRecordProcessor(t *testing.T) { } } +func TestNewSDK(t *testing.T) { + tests := []struct { + name string + cfg []ConfigurationOption + wantTracerProvider any + wantMeterProvider any + wantLoggerProvider any + wantErr error + wantShutdownErr error + }{ + { + name: "no-configuration", + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + { + name: "with-configuration", + cfg: []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: &TracerProviderJson{}, + MeterProvider: &MeterProviderJson{}, + LoggerProvider: &LoggerProviderJson{}, + }), + }, + wantTracerProvider: &sdktrace.TracerProvider{}, + wantMeterProvider: &sdkmetric.MeterProvider{}, + wantLoggerProvider: &sdklog.LoggerProvider{}, + }, + { + name: "with-sdk-disabled", + cfg: []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + Disabled: ptr(true), + TracerProvider: &TracerProviderJson{}, + MeterProvider: &MeterProviderJson{}, + LoggerProvider: &LoggerProviderJson{}, + }), + }, + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + { + name: "invalid resource", + cfg: []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: &TracerProviderJson{}, + MeterProvider: &MeterProviderJson{}, + LoggerProvider: &LoggerProviderJson{}, + Resource: &LoggerProviderJson{}, + }), + }, + wantErr: newErrInvalid("resource"), + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + { + name: "invalid logger provider", + cfg: []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: &TracerProviderJson{}, + MeterProvider: &MeterProviderJson{}, + LoggerProvider: &ResourceJson{}, + Resource: &ResourceJson{}, + }), + }, + wantErr: newErrInvalid("logger_provider"), + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + { + name: "invalid tracer provider", + cfg: []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: &ResourceJson{}, + }), + }, + wantErr: newErrInvalid("tracer_provider"), + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + } + for _, tt := range tests { + sdk, err := NewSDK(tt.cfg...) + require.Equal(t, tt.wantErr, err) + assert.IsType(t, tt.wantTracerProvider, sdk.TracerProvider()) + assert.IsType(t, tt.wantMeterProvider, sdk.MeterProvider()) + assert.IsType(t, tt.wantLoggerProvider, sdk.LoggerProvider()) + require.Equal(t, tt.wantShutdownErr, sdk.Shutdown(t.Context())) + } +} + +var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "1.0-rc.2", + AttributeLimits: &AttributeLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + InstrumentationDevelopment: &InstrumentationJson{ + Cpp: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Dotnet: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Erlang: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + General: &ExperimentalGeneralInstrumentation{ + Http: &ExperimentalHttpInstrumentation{ + Client: &ExperimentalHttpInstrumentationClient{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + Server: &ExperimentalHttpInstrumentationServer{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + }, + Peer: &ExperimentalPeerInstrumentation{ + ServiceMapping: []ExperimentalPeerInstrumentationServiceMappingElem{ + {Peer: "1.2.3.4", Service: "FooService"}, + {Peer: "2.3.4.5", Service: "BarService"}, + }, + }, + }, + Go: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Java: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Js: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Php: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Python: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Ruby: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Rust: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Swift: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + }, + LogLevel: ptr("info"), + LoggerProvider: &LoggerProviderJson{ + LoggerConfiguratorDevelopment: &ExperimentalLoggerConfigurator{ + DefaultConfig: &ExperimentalLoggerConfig{ + Disabled: ptr(true), + }, + Loggers: []ExperimentalLoggerMatcherAndConfig{ + { + Config: &ExperimentalLoggerConfig{ + Disabled: ptr(false), + }, + Name: ptr("io.opentelemetry.contrib.*"), + }, + }, + }, + Limits: &LogRecordLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + Processors: []LogRecordProcessor{ + { + Batch: &BatchLogRecordProcessor{ + ExportTimeout: ptr(30000), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Encoding: ptr(OTLPHttpEncodingProtobuf), + Endpoint: ptr("http://localhost:4318/v1/logs"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Endpoint: ptr("http://localhost:4317"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + Insecure: ptr(false), + }, + }, + }, + }, + { + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("file:///var/log/logs.jsonl"), + }, + }, + }, + }, + { + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("stdout"), + }, + }, + }, + }, + { + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + }, + MeterProvider: &MeterProviderJson{ + ExemplarFilter: ptr(ExemplarFilter("trace_based")), + MeterConfiguratorDevelopment: &ExperimentalMeterConfigurator{ + DefaultConfig: &ExperimentalMeterConfig{ + Disabled: ptr(true), + }, + Meters: []ExperimentalMeterMatcherAndConfig{ + { + Config: &ExperimentalMeterConfig{ + Disabled: ptr(false), + }, + Name: ptr("io.opentelemetry.contrib.*"), + }, + }, + }, + Readers: []MetricReader{ + { + Pull: &PullMetricReader{ + Producers: []MetricProducer{ + { + Opencensus: OpenCensusMetricProducer{}, + }, + }, + CardinalityLimits: &CardinalityLimits{ + Default: ptr(2000), + Counter: ptr(2000), + Gauge: ptr(2000), + Histogram: ptr(2000), + ObservableCounter: ptr(2000), + ObservableGauge: ptr(2000), + ObservableUpDownCounter: ptr(2000), + UpDownCounter: ptr(2000), + }, + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + Port: ptr(9464), + TranslationStrategy: ptr(ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithSuffixes), + WithResourceConstantLabels: &IncludeExclude{ + Excluded: []string{"service.attr1"}, + Included: []string{"service*"}, + }, + WithoutScopeInfo: ptr(false), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Producers: []MetricProducer{ + { + AdditionalProperties: map[string]any{ + "prometheus": nil, + }, + }, + }, + CardinalityLimits: &CardinalityLimits{ + Default: ptr(2000), + Counter: ptr(2000), + Gauge: ptr(2000), + Histogram: ptr(2000), + ObservableCounter: ptr(2000), + ObservableGauge: ptr(2000), + ObservableUpDownCounter: ptr(2000), + UpDownCounter: ptr(2000), + }, + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + Endpoint: ptr("http://localhost:4318/v1/metrics"), + Encoding: ptr(OTLPHttpEncodingProtobuf), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + Timeout: ptr(10000), + }, + }, + Interval: ptr(60000), + Timeout: ptr(30000), + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + Endpoint: ptr("http://localhost:4317"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + Timeout: ptr(10000), + Insecure: ptr(false), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{ + OutputStream: ptr("file:///var/log/metrics.jsonl"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{ + OutputStream: ptr("stdout"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + Views: []View{ + { + Selector: &ViewSelector{ + InstrumentName: ptr("my-instrument"), + InstrumentType: ptr(InstrumentTypeHistogram), + MeterName: ptr("my-meter"), + MeterSchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + MeterVersion: ptr("1.0.0"), + Unit: ptr("ms"), + }, + Stream: &ViewStream{ + Aggregation: &Aggregation{ + ExplicitBucketHistogram: &ExplicitBucketHistogramAggregation{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + RecordMinMax: ptr(true), + }, + }, + AggregationCardinalityLimit: ptr(2000), + AttributeKeys: &IncludeExclude{ + Included: []string{"key1", "key2"}, + Excluded: []string{"key3"}, + }, + Description: ptr("new_description"), + Name: ptr("new_instrument_name"), + }, + }, + }, + }, + Propagator: &PropagatorJson{ + Composite: []TextMapPropagator{ + { + Tracecontext: TraceContextPropagator{}, + }, + { + Baggage: BaggagePropagator{}, + }, + { + B3: B3Propagator{}, + }, + { + B3Multi: B3MultiPropagator{}, + }, + { + Jaeger: JaegerPropagator{}, + }, + { + Ottrace: OpenTracingPropagator{}, + }, + }, + CompositeList: ptr("tracecontext,baggage,b3,b3multi,jaeger,ottrace,xray"), + }, + Resource: &ResourceJson{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "unknown_service"}, + {Name: "string_key", Type: &AttributeType{Value: "string"}, Value: "value"}, + {Name: "bool_key", Type: &AttributeType{Value: "bool"}, Value: true}, + {Name: "int_key", Type: &AttributeType{Value: "int"}, Value: 1}, + {Name: "double_key", Type: &AttributeType{Value: "double"}, Value: 1.1}, + {Name: "string_array_key", Type: &AttributeType{Value: "string_array"}, Value: []any{"value1", "value2"}}, + {Name: "bool_array_key", Type: &AttributeType{Value: "bool_array"}, Value: []any{true, false}}, + {Name: "int_array_key", Type: &AttributeType{Value: "int_array"}, Value: []any{1, 2}}, + {Name: "double_array_key", Type: &AttributeType{Value: "double_array"}, Value: []any{1.1, 2.2}}, + }, + AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), + DetectionDevelopment: &ExperimentalResourceDetection{ + Attributes: &IncludeExclude{ + Excluded: []string{"process.command_args"}, + Included: []string{"process.*"}, + }, + // TODO: implement resource detectors + // Detectors: []ExperimentalResourceDetector{} + // }, + }, + SchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + }, + TracerProvider: &TracerProviderJson{ + TracerConfiguratorDevelopment: &ExperimentalTracerConfigurator{ + DefaultConfig: &ExperimentalTracerConfig{ + Disabled: ptr(true), + }, + Tracers: []ExperimentalTracerMatcherAndConfig{ + { + Config: ptr(ExperimentalTracerConfig{ + Disabled: ptr(false), + }), + Name: ptr("io.opentelemetry.contrib.*"), + }, + }, + }, + + Limits: &SpanLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + EventCountLimit: ptr(128), + EventAttributeCountLimit: ptr(128), + LinkCountLimit: ptr(128), + LinkAttributeCountLimit: ptr(128), + }, + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{ + ExportTimeout: ptr(30000), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Encoding: ptr(OTLPHttpEncodingProtobuf), + Endpoint: ptr("http://localhost:4318/v1/traces"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Endpoint: ptr("http://localhost:4317"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + Insecure: ptr(false), + }, + }, + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("file:///var/log/traces.jsonl"), + }, + }, + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("stdout"), + }, + }, + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + Zipkin: &ZipkinSpanExporter{ + Endpoint: ptr("http://localhost:9411/api/v2/spans"), + Timeout: ptr(10000), + }, + }, + }, + }, + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + Sampler: &Sampler{ + ParentBased: &ParentBasedSampler{ + LocalParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + LocalParentSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + RemoteParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + RemoteParentSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + Root: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.0001), + }, + }, + }, + }, + }, +} + +var v100OpenTelemetryConfigEnvParsing = OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "1.0", + LogLevel: ptr("info"), + AttributeLimits: &AttributeLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + Resource: &ResourceJson{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "unknown_service"}, + {Name: "string_key", Type: &AttributeType{Value: "string"}, Value: "value"}, + {Name: "bool_key", Type: &AttributeType{Value: "bool"}, Value: true}, + {Name: "int_key", Type: &AttributeType{Value: "int"}, Value: 1}, + {Name: "double_key", Type: &AttributeType{Value: "double"}, Value: 1.1}, + {Name: "string_array_key", Type: &AttributeType{Value: "string_array"}, Value: []any{"value1", "value2"}}, + {Name: "bool_array_key", Type: &AttributeType{Value: "bool_array"}, Value: []any{true, false}}, + {Name: "int_array_key", Type: &AttributeType{Value: "int_array"}, Value: []any{1, 2}}, + {Name: "double_array_key", Type: &AttributeType{Value: "double_array"}, Value: []any{1.1, 2.2}}, + {Name: "string_value", Type: &AttributeType{Value: "string"}, Value: "value"}, + {Name: "bool_value", Type: &AttributeType{Value: "bool"}, Value: true}, + {Name: "int_value", Type: &AttributeType{Value: "int"}, Value: 1}, + {Name: "float_value", Type: &AttributeType{Value: "double"}, Value: 1.1}, + {Name: "hex_value", Type: &AttributeType{Value: "int"}, Value: int(48879)}, + {Name: "quoted_string_value", Type: &AttributeType{Value: "string"}, Value: "value"}, + {Name: "quoted_bool_value", Type: &AttributeType{Value: "string"}, Value: "true"}, + {Name: "quoted_int_value", Type: &AttributeType{Value: "string"}, Value: "1"}, + {Name: "quoted_float_value", Type: &AttributeType{Value: "string"}, Value: "1.1"}, + {Name: "quoted_hex_value", Type: &AttributeType{Value: "string"}, Value: "0xbeef"}, + {Name: "alternative_env_syntax", Type: &AttributeType{Value: "string"}, Value: "value"}, + {Name: "invalid_map_value", Type: &AttributeType{Value: "string"}, Value: "value\nkey:value"}, + {Name: "multiple_references_inject", Type: &AttributeType{Value: "string"}, Value: "foo value 1.1"}, + {Name: "undefined_key", Type: &AttributeType{Value: "string"}, Value: nil}, + {Name: "undefined_key_fallback", Type: &AttributeType{Value: "string"}, Value: "fallback"}, + {Name: "env_var_in_key", Type: &AttributeType{Value: "string"}, Value: "value"}, + {Name: "replace_me", Type: &AttributeType{Value: "string"}, Value: "${DO_NOT_REPLACE_ME}"}, + {Name: "undefined_defaults_to_var", Type: &AttributeType{Value: "string"}, Value: "${STRING_VALUE}"}, + {Name: "escaped_does_not_substitute", Type: &AttributeType{Value: "string"}, Value: "${STRING_VALUE}"}, + {Name: "escaped_does_not_substitute_fallback", Type: &AttributeType{Value: "string"}, Value: "${STRING_VALUE:-fallback}"}, + {Name: "escaped_and_substituted_fallback", Type: &AttributeType{Value: "string"}, Value: "${STRING_VALUE:-value}"}, + {Name: "escaped_and_substituted", Type: &AttributeType{Value: "string"}, Value: "$value"}, + {Name: "multiple_escaped_and_not_substituted", Type: &AttributeType{Value: "string"}, Value: "$${STRING_VALUE}"}, + {Name: "undefined_key_with_escape_sequence_in_fallback", Type: &AttributeType{Value: "string"}, Value: "${UNDEFINED_KEY}"}, + {Name: "value_with_escape", Type: &AttributeType{Value: "string"}, Value: "value$$"}, + {Name: "escape_sequence", Type: &AttributeType{Value: "string"}, Value: "a $ b"}, + {Name: "no_escape_sequence", Type: &AttributeType{Value: "string"}, Value: "a $ b"}, + }, + AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), + // Detectors: &Detectors{ + // Attributes: &DetectorsAttributes{ + // Excluded: []string{"process.command_args"}, + // Included: []string{"process.*"}, + // }, + // }, + SchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + }, +} + +func TestParseFiles(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType *OpenTelemetryConfiguration + }{ + { + name: "invalid nil name", + input: "v1.0.0_invalid_nil_name", + wantErr: newErrRequired(&NameStringValuePair{}, "name"), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "invalid nil value", + input: "v1.0.0_invalid_nil_value", + wantErr: newErrRequired(&NameStringValuePair{}, "value"), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "valid v0.2 config", + input: "v0.2", + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "valid v0.3 config", + input: "v0.3", + wantErr: newErrUnmarshal(&TextMapPropagator{}), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "valid v1.0.0 config", + input: "v1.0.0", + wantType: &v10OpenTelemetryConfig, + }, + } + + for _, tt := range tests { + t.Run("yaml:"+tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("testdata", fmt.Sprintf("%s.yaml", tt.input))) + require.NoError(t, err) + + got, err := ParseYAML(b) + require.ErrorIs(t, err, tt.wantErr) + if tt.wantErr == nil { + assert.Equal(t, tt.wantType, got) + } + }) + t.Run("json: "+tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("testdata", fmt.Sprintf("%s.json", tt.input))) + require.NoError(t, err) + + var got OpenTelemetryConfiguration + err = json.Unmarshal(b, &got) + require.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantType, &got) + }) + } +} + +func TestUnmarshalOpenTelemetryConfiguration(t *testing.T) { + tests := []struct { + name string + jsonConfig []byte + yamlConfig []byte + wantErr error + wantType OpenTelemetryConfiguration + }{ + { + name: "valid defaults config", + jsonConfig: []byte(`{"file_format": "1.0"}`), + yamlConfig: []byte("file_format: 1.0"), + wantType: OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "1.0", + LogLevel: ptr("info"), + }, + }, + { + name: "invalid config missing required file_format", + jsonConfig: []byte(`{"disabled": false}`), + yamlConfig: []byte("disabled: false"), + wantErr: newErrRequired(&OpenTelemetryConfiguration{}, "file_format"), + }, + { + name: "file_format invalid", + jsonConfig: []byte(`{"file_format":[], "disabled": false}`), + yamlConfig: []byte("file_format: []\ndisabled: false"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "invalid config", + jsonConfig: []byte(`{"file_format": "yaml", "disabled": "notabool"}`), + yamlConfig: []byte("file_format: []\ndisabled: notabool"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("disabled: []\nconsole: {}\nfile_format: str"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "resource invalid", + jsonConfig: []byte(`{"resource":[], "file_format": "1.0"}`), + yamlConfig: []byte("resource: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "attribute_limits invalid", + jsonConfig: []byte(`{"attribute_limits":[], "file_format": "1.0"}`), + yamlConfig: []byte("attribute_limits: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "instrumentation invalid", + jsonConfig: []byte(`{"instrumentation/development":[], "file_format": "1.0"}`), + yamlConfig: []byte("instrumentation/development: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "log_level invalid", + jsonConfig: []byte(`{"log_level":[], "file_format": "1.0"}`), + yamlConfig: []byte("log_level: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := OpenTelemetryConfiguration{} + err := got.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantType, got) + + got = OpenTelemetryConfiguration{} + err = yaml.Unmarshal(tt.yamlConfig, &got) + assert.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantType, got) + }) + } +} + func TestUnmarshalBatchSpanProcessor(t *testing.T) { for _, tt := range []struct { name string @@ -440,6 +1263,49 @@ func TestUnmarshalBatchSpanProcessor(t *testing.T) { } } +func TestParseYAMLWithEnvironmentVariables(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType any + }{ + { + name: "valid v1.0.0 config with env vars", + input: "v1.0.0_env_var.yaml", + wantType: &v100OpenTelemetryConfigEnvParsing, + }, + } + + t.Setenv("OTEL_SDK_DISABLED", "false") + t.Setenv("OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", "4096") + t.Setenv("OTEL_EXPORTER_OTLP_PROTOCOL", "http/protobuf") + t.Setenv("STRING_VALUE", "value") + t.Setenv("BOOL_VALUE", "true") + t.Setenv("INT_VALUE", "1") + t.Setenv("FLOAT_VALUE", "1.1") + t.Setenv("HEX_VALUE", "0xbeef") // A valid integer value (i.e. 3735928559) written in hexadecimal + t.Setenv("INVALID_MAP_VALUE", "value\\nkey:value") // An invalid attempt to inject a map key into the YAML + t.Setenv("ENV_VAR_IN_KEY", "env_var_in_key") // An env var in key + t.Setenv("DO_NOT_REPLACE_ME", "Never use this value") // An unused environment variable + t.Setenv("REPLACE_ME", "${DO_NOT_REPLACE_ME}") // A valid replacement text, used verbatim, not replaced with "Never use this value" + t.Setenv("VALUE_WITH_ESCAPE", "value$$") // A valid replacement text, used verbatim, not replaced with "Never use this value" + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("testdata", tt.input)) + require.NoError(t, err) + + got, err := ParseYAML(b) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr.Error(), err.Error()) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantType, got) + } + }) + } +} + func TestUnmarshalPeriodicMetricReader(t *testing.T) { for _, tt := range []struct { name string diff --git a/otelconf/config_yaml.go b/otelconf/config_yaml.go index 4e8bab5b282..e116dab207c 100644 --- a/otelconf/config_yaml.go +++ b/otelconf/config_yaml.go @@ -41,6 +41,74 @@ func (j *PushMetricExporter) UnmarshalYAML(node *yaml.Node) error { return nil } +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "file_format") { + return newErrRequired(j, "file_format") + } + type Plain OpenTelemetryConfiguration + type shadow struct { + Plain + LogLevel *string `yaml:"log_level,omitempty"` + AttributeLimits *AttributeLimits `yaml:"attribute_limits,omitempty"` + Disabled *bool `yaml:"disabled,omitempty"` + FileFormat string `yaml:"file_format"` + LoggerProvider *LoggerProviderJson `yaml:"logger_provider,omitempty"` + MeterProvider *MeterProviderJson `yaml:"meter_provider,omitempty"` + TracerProvider *TracerProviderJson `yaml:"tracer_provider,omitempty"` + Propagator *PropagatorJson `yaml:"propagator,omitempty"` + Resource *ResourceJson `yaml:"resource,omitempty"` + InstrumentationDevelopment *InstrumentationJson `yaml:"instrumentation/development"` + } + var sh shadow + + if err := node.Decode(&sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.AttributeLimits != nil { + sh.Plain.AttributeLimits = sh.AttributeLimits + } + + sh.Plain.FileFormat = sh.FileFormat + if sh.Disabled != nil { + sh.Plain.Disabled = sh.Disabled + } else { + // Configure the log level of the internal logger used by the SDK. + // If omitted, info is used. + sh.Plain.Disabled = ptr(false) + } + if sh.LoggerProvider != nil { + sh.Plain.LoggerProvider = sh.LoggerProvider + } + if sh.MeterProvider != nil { + sh.Plain.MeterProvider = sh.MeterProvider + } + if sh.TracerProvider != nil { + sh.Plain.TracerProvider = sh.TracerProvider + } + if sh.Propagator != nil { + sh.Plain.Propagator = sh.Propagator + } + if sh.Resource != nil { + sh.Plain.Resource = sh.Resource + } + if sh.InstrumentationDevelopment != nil { + sh.Plain.InstrumentationDevelopment = sh.InstrumentationDevelopment + } + + if sh.LogLevel != nil { + sh.Plain.LogLevel = sh.LogLevel + } else { + // Configure the log level of the internal logger used by the SDK. + // If omitted, info is used. + sh.Plain.LogLevel = ptr("info") + } + + *j = OpenTelemetryConfiguration(sh.Plain) + return nil +} + // UnmarshalYAML implements yaml.Unmarshaler. func (j *SpanExporter) UnmarshalYAML(node *yaml.Node) error { type Plain SpanExporter @@ -123,6 +191,38 @@ func (j *BatchLogRecordProcessor) UnmarshalYAML(node *yaml.Node) error { return nil } +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *Sampler) UnmarshalYAML(node *yaml.Node) error { + var raw map[string]any + if err := node.Decode(&raw); err != nil { + return err + } + type Plain Sampler + var plain Plain + if err := node.Decode(&plain); err != nil { + return err + } + unmarshalSamplerTypes(raw, (*Sampler)(&plain)) + *j = Sampler(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *MetricProducer) UnmarshalYAML(node *yaml.Node) error { + var raw map[string]any + if err := node.Decode(&raw); err != nil { + return err + } + type Plain MetricProducer + var plain Plain + if err := node.Decode(&plain); err != nil { + return err + } + unmarshalMetricProducer(raw, (*MetricProducer)(&plain)) + *j = MetricProducer(plain) + return nil +} + // UnmarshalYAML implements yaml.Unmarshaler. func (j *BatchSpanProcessor) UnmarshalYAML(node *yaml.Node) error { if !hasYAMLMapKey(node, "exporter") { @@ -443,3 +543,14 @@ func (j *PullMetricReader) UnmarshalYAML(node *yaml.Node) error { *j = PullMetricReader(plain) return nil } + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *ExperimentalLanguageSpecificInstrumentation) UnmarshalYAML(unmarshal func(any) error) error { + var raw map[string]any + if err := unmarshal(&raw); err != nil { + return err + } + + *j = raw + return nil +} diff --git a/otelconf/example_test.go b/otelconf/example_test.go index 8cfdc0c39eb..0d9b67e67f6 100644 --- a/otelconf/example_test.go +++ b/otelconf/example_test.go @@ -12,11 +12,11 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/log/global" - otelconf "go.opentelemetry.io/contrib/otelconf/v0.3.0" + "go.opentelemetry.io/contrib/otelconf" ) func Example() { - b, err := os.ReadFile(filepath.Join("testdata", "v0.3.yaml")) + b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.yaml")) if err != nil { log.Fatal(err) } diff --git a/otelconf/fuzz_test.go b/otelconf/fuzz_test.go new file mode 100644 index 00000000000..e1c32b48b72 --- /dev/null +++ b/otelconf/fuzz_test.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func FuzzJSON(f *testing.F) { + b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.json")) + require.NoError(f, err) + f.Add(b) + + f.Fuzz(func(t *testing.T, data []byte) { + t.Log("JSON:\n" + string(data)) + + var cfg OpenTelemetryConfiguration + err := json.Unmarshal(b, &cfg) + if err != nil { + return + } + + sdk, err := NewSDK(WithOpenTelemetryConfiguration(cfg)) + if err != nil { + return + } + + ctx, cancel := context.WithTimeout(t.Context(), time.Millisecond) + defer cancel() + _ = sdk.Shutdown(ctx) + }) +} + +func FuzzYAML(f *testing.F) { + b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.yaml")) + require.NoError(f, err) + f.Add(b) + + f.Fuzz(func(t *testing.T, data []byte) { + t.Log("YAML:\n" + string(data)) + + cfg, err := ParseYAML(data) + if err != nil { + return + } + + sdk, err := NewSDK(WithOpenTelemetryConfiguration(*cfg)) + if err != nil { + return + } + + ctx, cancel := context.WithTimeout(t.Context(), time.Millisecond) + defer cancel() + _ = sdk.Shutdown(ctx) + }) +} diff --git a/otelconf/generated_config.go b/otelconf/generated_config.go index 6c508e9b030..0b79250a1cd 100644 --- a/otelconf/generated_config.go +++ b/otelconf/generated_config.go @@ -147,27 +147,27 @@ const ExemplarFilterAlwaysOff ExemplarFilter = "always_off" const ExemplarFilterAlwaysOn ExemplarFilter = "always_on" const ExemplarFilterTraceBased ExemplarFilter = "trace_based" -type ExperimentalContainerResourceDetector map[string]interface{} +type experimentalContainerResourceDetector map[string]interface{} -type ExperimentalGeneralInstrumentation struct { +type experimentalGeneralInstrumentation struct { // Http corresponds to the JSON schema field "http". - Http *ExperimentalHttpInstrumentation `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + Http *experimentalHttpInstrumentation `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` // Peer corresponds to the JSON schema field "peer". - Peer *ExperimentalPeerInstrumentation `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` + Peer *experimentalPeerInstrumentation `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` } -type ExperimentalHostResourceDetector map[string]interface{} +type experimentalHostResourceDetector map[string]interface{} -type ExperimentalHttpInstrumentation struct { +type experimentalHttpInstrumentation struct { // Client corresponds to the JSON schema field "client". - Client *ExperimentalHttpInstrumentationClient `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` + Client *experimentalHttpInstrumentationClient `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` // Server corresponds to the JSON schema field "server". - Server *ExperimentalHttpInstrumentationServer `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` + Server *experimentalHttpInstrumentationServer `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` } -type ExperimentalHttpInstrumentationClient struct { +type experimentalHttpInstrumentationClient struct { // RequestCapturedHeaders corresponds to the JSON schema field // "request_captured_headers". RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` @@ -177,7 +177,7 @@ type ExperimentalHttpInstrumentationClient struct { ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` } -type ExperimentalHttpInstrumentationServer struct { +type experimentalHttpInstrumentationServer struct { // RequestCapturedHeaders corresponds to the JSON schema field // "request_captured_headers". RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` @@ -187,56 +187,56 @@ type ExperimentalHttpInstrumentationServer struct { ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` } -type ExperimentalLanguageSpecificInstrumentation map[string]interface{} +type experimentalLanguageSpecificInstrumentation map[string]interface{} -type ExperimentalLoggerConfig struct { +type experimentalLoggerConfig struct { // Disabled corresponds to the JSON schema field "disabled". Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` } -type ExperimentalLoggerConfigurator struct { +type experimentalLoggerConfigurator struct { // DefaultConfig corresponds to the JSON schema field "default_config". - DefaultConfig *ExperimentalLoggerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + DefaultConfig *experimentalLoggerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` // Loggers corresponds to the JSON schema field "loggers". - Loggers []ExperimentalLoggerMatcherAndConfig `json:"loggers,omitempty" yaml:"loggers,omitempty" mapstructure:"loggers,omitempty"` + Loggers []experimentalLoggerMatcherAndConfig `json:"loggers,omitempty" yaml:"loggers,omitempty" mapstructure:"loggers,omitempty"` } -type ExperimentalLoggerMatcherAndConfig struct { +type experimentalLoggerMatcherAndConfig struct { // Config corresponds to the JSON schema field "config". - Config *ExperimentalLoggerConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + Config *experimentalLoggerConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` // Name corresponds to the JSON schema field "name". Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` } -type ExperimentalMeterConfig struct { +type experimentalMeterConfig struct { // Disabled corresponds to the JSON schema field "disabled". Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` } -type ExperimentalMeterConfigurator struct { +type experimentalMeterConfigurator struct { // DefaultConfig corresponds to the JSON schema field "default_config". - DefaultConfig *ExperimentalMeterConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + DefaultConfig *experimentalMeterConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` // Meters corresponds to the JSON schema field "meters". - Meters []ExperimentalMeterMatcherAndConfig `json:"meters,omitempty" yaml:"meters,omitempty" mapstructure:"meters,omitempty"` + Meters []experimentalMeterMatcherAndConfig `json:"meters,omitempty" yaml:"meters,omitempty" mapstructure:"meters,omitempty"` } -type ExperimentalMeterMatcherAndConfig struct { +type experimentalMeterMatcherAndConfig struct { // Config corresponds to the JSON schema field "config". - Config *ExperimentalMeterConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + Config *experimentalMeterConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` // Name corresponds to the JSON schema field "name". Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` } -type ExperimentalOTLPFileExporter struct { +type experimentalOTLPFileExporter struct { // OutputStream corresponds to the JSON schema field "output_stream". OutputStream *string `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` } -type ExperimentalOTLPFileMetricExporter struct { +type experimentalOTLPFileMetricExporter struct { // DefaultHistogramAggregation corresponds to the JSON schema field // "default_histogram_aggregation". DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` @@ -249,12 +249,12 @@ type ExperimentalOTLPFileMetricExporter struct { TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` } -type ExperimentalPeerInstrumentation struct { +type experimentalPeerInstrumentation struct { // ServiceMapping corresponds to the JSON schema field "service_mapping". - ServiceMapping []ExperimentalPeerInstrumentationServiceMappingElem `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` + ServiceMapping []experimentalPeerInstrumentationServiceMappingElem `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` } -type ExperimentalPeerInstrumentationServiceMappingElem struct { +type experimentalPeerInstrumentationServiceMappingElem struct { // Peer corresponds to the JSON schema field "peer". Peer string `json:"peer" yaml:"peer" mapstructure:"peer"` @@ -262,9 +262,9 @@ type ExperimentalPeerInstrumentationServiceMappingElem struct { Service string `json:"service" yaml:"service" mapstructure:"service"` } -type ExperimentalProcessResourceDetector map[string]interface{} +type experimentalProcessResourceDetector map[string]interface{} -type ExperimentalPrometheusMetricExporter struct { +type experimentalPrometheusMetricExporter struct { // Host corresponds to the JSON schema field "host". Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` @@ -273,7 +273,7 @@ type ExperimentalPrometheusMetricExporter struct { // TranslationStrategy corresponds to the JSON schema field // "translation_strategy". - TranslationStrategy *ExperimentalPrometheusMetricExporterTranslationStrategy `json:"translation_strategy,omitempty" yaml:"translation_strategy,omitempty" mapstructure:"translation_strategy,omitempty"` + TranslationStrategy *experimentalPrometheusMetricExporterTranslationStrategy `json:"translation_strategy,omitempty" yaml:"translation_strategy,omitempty" mapstructure:"translation_strategy,omitempty"` // WithResourceConstantLabels corresponds to the JSON schema field // "with_resource_constant_labels". @@ -283,55 +283,55 @@ type ExperimentalPrometheusMetricExporter struct { WithoutScopeInfo *bool `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` } -type ExperimentalPrometheusMetricExporterTranslationStrategy string +type experimentalPrometheusMetricExporterTranslationStrategy string -const ExperimentalPrometheusMetricExporterTranslationStrategyNoTranslation ExperimentalPrometheusMetricExporterTranslationStrategy = "NoTranslation" -const ExperimentalPrometheusMetricExporterTranslationStrategyNoUTF8EscapingWithSuffixes ExperimentalPrometheusMetricExporterTranslationStrategy = "NoUTF8EscapingWithSuffixes" -const ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithSuffixes ExperimentalPrometheusMetricExporterTranslationStrategy = "UnderscoreEscapingWithSuffixes" -const ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithoutSuffixes ExperimentalPrometheusMetricExporterTranslationStrategy = "UnderscoreEscapingWithoutSuffixes" +const experimentalPrometheusMetricExporterTranslationStrategyNoTranslation experimentalPrometheusMetricExporterTranslationStrategy = "NoTranslation" +const experimentalPrometheusMetricExporterTranslationStrategyNoUTF8EscapingWithSuffixes experimentalPrometheusMetricExporterTranslationStrategy = "NoUTF8EscapingWithSuffixes" +const experimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithSuffixes experimentalPrometheusMetricExporterTranslationStrategy = "UnderscoreEscapingWithSuffixes" +const experimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithoutSuffixes experimentalPrometheusMetricExporterTranslationStrategy = "UnderscoreEscapingWithoutSuffixes" -type ExperimentalResourceDetection struct { +type experimentalResourceDetection struct { // Attributes corresponds to the JSON schema field "attributes". Attributes *IncludeExclude `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` // Detectors corresponds to the JSON schema field "detectors". - Detectors []ExperimentalResourceDetector `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` + Detectors []experimentalResourceDetector `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` } -type ExperimentalResourceDetector struct { +type experimentalResourceDetector struct { // Container corresponds to the JSON schema field "container". - Container ExperimentalContainerResourceDetector `json:"container,omitempty" yaml:"container,omitempty" mapstructure:"container,omitempty"` + Container experimentalContainerResourceDetector `json:"container,omitempty" yaml:"container,omitempty" mapstructure:"container,omitempty"` // Host corresponds to the JSON schema field "host". - Host ExperimentalHostResourceDetector `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + Host experimentalHostResourceDetector `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` // Process corresponds to the JSON schema field "process". - Process ExperimentalProcessResourceDetector `json:"process,omitempty" yaml:"process,omitempty" mapstructure:"process,omitempty"` + Process experimentalProcessResourceDetector `json:"process,omitempty" yaml:"process,omitempty" mapstructure:"process,omitempty"` // Service corresponds to the JSON schema field "service". - Service ExperimentalServiceResourceDetector `json:"service,omitempty" yaml:"service,omitempty" mapstructure:"service,omitempty"` + Service experimentalServiceResourceDetector `json:"service,omitempty" yaml:"service,omitempty" mapstructure:"service,omitempty"` AdditionalProperties interface{} `mapstructure:",remain"` } -type ExperimentalServiceResourceDetector map[string]interface{} +type experimentalServiceResourceDetector map[string]interface{} -type ExperimentalTracerConfig struct { +type experimentalTracerConfig struct { // Disabled corresponds to the JSON schema field "disabled". Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` } -type ExperimentalTracerConfigurator struct { +type experimentalTracerConfigurator struct { // DefaultConfig corresponds to the JSON schema field "default_config". - DefaultConfig *ExperimentalTracerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + DefaultConfig *experimentalTracerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` // Tracers corresponds to the JSON schema field "tracers". - Tracers []ExperimentalTracerMatcherAndConfig `json:"tracers,omitempty" yaml:"tracers,omitempty" mapstructure:"tracers,omitempty"` + Tracers []experimentalTracerMatcherAndConfig `json:"tracers,omitempty" yaml:"tracers,omitempty" mapstructure:"tracers,omitempty"` } -type ExperimentalTracerMatcherAndConfig struct { +type experimentalTracerMatcherAndConfig struct { // Config corresponds to the JSON schema field "config". - Config *ExperimentalTracerConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + Config *experimentalTracerConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` // Name corresponds to the JSON schema field "name". Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` @@ -376,40 +376,40 @@ const InstrumentTypeUpDownCounter InstrumentType = "up_down_counter" type InstrumentationJson struct { // Cpp corresponds to the JSON schema field "cpp". - Cpp ExperimentalLanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` + Cpp experimentalLanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` // Dotnet corresponds to the JSON schema field "dotnet". - Dotnet ExperimentalLanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` + Dotnet experimentalLanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` // Erlang corresponds to the JSON schema field "erlang". - Erlang ExperimentalLanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` + Erlang experimentalLanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` // General corresponds to the JSON schema field "general". - General *ExperimentalGeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` + General *experimentalGeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` // Go corresponds to the JSON schema field "go". - Go ExperimentalLanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` + Go experimentalLanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` // Java corresponds to the JSON schema field "java". - Java ExperimentalLanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` + Java experimentalLanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` // Js corresponds to the JSON schema field "js". - Js ExperimentalLanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` + Js experimentalLanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` // Php corresponds to the JSON schema field "php". - Php ExperimentalLanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` + Php experimentalLanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` // Python corresponds to the JSON schema field "python". - Python ExperimentalLanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` + Python experimentalLanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` // Ruby corresponds to the JSON schema field "ruby". - Ruby ExperimentalLanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` + Ruby experimentalLanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` // Rust corresponds to the JSON schema field "rust". - Rust ExperimentalLanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` + Rust experimentalLanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` // Swift corresponds to the JSON schema field "swift". - Swift ExperimentalLanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` + Swift experimentalLanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` } type JaegerPropagator map[string]interface{} @@ -431,9 +431,9 @@ type LogRecordExporter struct { // Console corresponds to the JSON schema field "console". Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - // OTLPFileDevelopment corresponds to the JSON schema field + // unexportedOTLPFileDevelopment corresponds to the JSON schema field // "otlp_file/development". - OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + unexportedOTLPFileDevelopment *experimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` @@ -468,9 +468,9 @@ type LoggerProviderJson struct { // Limits corresponds to the JSON schema field "limits". Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - // LoggerConfiguratorDevelopment corresponds to the JSON schema field + // unexportedLoggerConfiguratorDevelopment corresponds to the JSON schema field // "logger_configurator/development". - LoggerConfiguratorDevelopment *ExperimentalLoggerConfigurator `json:"logger_configurator/development,omitempty" yaml:"logger_configurator/development,omitempty" mapstructure:"logger_configurator/development,omitempty"` + unexportedLoggerConfiguratorDevelopment *experimentalLoggerConfigurator `json:"logger_configurator/development,omitempty" yaml:"logger_configurator/development,omitempty" mapstructure:"logger_configurator/development,omitempty"` // Processors corresponds to the JSON schema field "processors". Processors []LogRecordProcessor `json:"processors" yaml:"processors" mapstructure:"processors"` @@ -480,9 +480,9 @@ type MeterProviderJson struct { // ExemplarFilter corresponds to the JSON schema field "exemplar_filter". ExemplarFilter *ExemplarFilter `json:"exemplar_filter,omitempty" yaml:"exemplar_filter,omitempty" mapstructure:"exemplar_filter,omitempty"` - // MeterConfiguratorDevelopment corresponds to the JSON schema field + // unexportedMeterConfiguratorDevelopment corresponds to the JSON schema field // "meter_configurator/development". - MeterConfiguratorDevelopment *ExperimentalMeterConfigurator `json:"meter_configurator/development,omitempty" yaml:"meter_configurator/development,omitempty" mapstructure:"meter_configurator/development,omitempty"` + unexportedMeterConfiguratorDevelopment *experimentalMeterConfigurator `json:"meter_configurator/development,omitempty" yaml:"meter_configurator/development,omitempty" mapstructure:"meter_configurator/development,omitempty"` // Readers corresponds to the JSON schema field "readers". Readers []MetricReader `json:"readers" yaml:"readers" mapstructure:"readers"` @@ -667,9 +667,9 @@ type OpenTelemetryConfiguration struct { // FileFormat corresponds to the JSON schema field "file_format". FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` - // InstrumentationDevelopment corresponds to the JSON schema field + // unexportedInstrumentationDevelopment corresponds to the JSON schema field // "instrumentation/development". - InstrumentationDevelopment OpenTelemetryConfigurationInstrumentationDevelopment `json:"instrumentation/development,omitempty" yaml:"instrumentation/development,omitempty" mapstructure:"instrumentation/development,omitempty"` + unexportedInstrumentationDevelopment OpenTelemetryConfigurationInstrumentationDevelopment `json:"instrumentation/development,omitempty" yaml:"instrumentation/development,omitempty" mapstructure:"instrumentation/development,omitempty"` // LogLevel corresponds to the JSON schema field "log_level". LogLevel *string `json:"log_level,omitempty" yaml:"log_level,omitempty" mapstructure:"log_level,omitempty"` @@ -692,7 +692,7 @@ type OpenTelemetryConfiguration struct { AdditionalProperties interface{} `mapstructure:",remain"` } -type OpenTelemetryConfigurationInstrumentationDevelopment interface{} +type unexportedOpenTelemetryConfigurationInstrumentationDevelopment interface{} type OpenTelemetryConfigurationLoggerProvider interface{} @@ -752,9 +752,9 @@ type PropagatorJson struct { } type PullMetricExporter struct { - // PrometheusDevelopment corresponds to the JSON schema field + // unexportedPrometheusDevelopment corresponds to the JSON schema field // "prometheus/development". - PrometheusDevelopment *ExperimentalPrometheusMetricExporter `json:"prometheus/development,omitempty" yaml:"prometheus/development,omitempty" mapstructure:"prometheus/development,omitempty"` + unexportedPrometheusDevelopment *experimentalPrometheusMetricExporter `json:"prometheus/development,omitempty" yaml:"prometheus/development,omitempty" mapstructure:"prometheus/development,omitempty"` AdditionalProperties interface{} `mapstructure:",remain"` } @@ -774,9 +774,9 @@ type PushMetricExporter struct { // Console corresponds to the JSON schema field "console". Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - // OTLPFileDevelopment corresponds to the JSON schema field + // unexportedOTLPFileDevelopment corresponds to the JSON schema field // "otlp_file/development". - OTLPFileDevelopment *ExperimentalOTLPFileMetricExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + unexportedOTLPFileDevelopment *experimentalOTLPFileMetricExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". OTLPGrpc *OTLPGrpcMetricExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` @@ -794,9 +794,9 @@ type ResourceJson struct { // AttributesList corresponds to the JSON schema field "attributes_list". AttributesList *string `json:"attributes_list,omitempty" yaml:"attributes_list,omitempty" mapstructure:"attributes_list,omitempty"` - // DetectionDevelopment corresponds to the JSON schema field + // unexportedDetectionDevelopment corresponds to the JSON schema field // "detection/development". - DetectionDevelopment *ExperimentalResourceDetection `json:"detection/development,omitempty" yaml:"detection/development,omitempty" mapstructure:"detection/development,omitempty"` + unexportedDetectionDevelopment *experimentalResourceDetection `json:"detection/development,omitempty" yaml:"detection/development,omitempty" mapstructure:"detection/development,omitempty"` // SchemaUrl corresponds to the JSON schema field "schema_url". SchemaUrl *string `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` @@ -835,9 +835,9 @@ type SpanExporter struct { // Console corresponds to the JSON schema field "console". Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - // OTLPFileDevelopment corresponds to the JSON schema field + // unexportedOTLPFileDevelopment corresponds to the JSON schema field // "otlp_file/development". - OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + unexportedOTLPFileDevelopment *experimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` @@ -926,9 +926,9 @@ type TracerProviderJson struct { // Sampler corresponds to the JSON schema field "sampler". Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` - // TracerConfiguratorDevelopment corresponds to the JSON schema field + // unexportedTracerConfiguratorDevelopment corresponds to the JSON schema field // "tracer_configurator/development". - TracerConfiguratorDevelopment *ExperimentalTracerConfigurator `json:"tracer_configurator/development,omitempty" yaml:"tracer_configurator/development,omitempty" mapstructure:"tracer_configurator/development,omitempty"` + unexportedTracerConfiguratorDevelopment *experimentalTracerConfigurator `json:"tracer_configurator/development,omitempty" yaml:"tracer_configurator/development,omitempty" mapstructure:"tracer_configurator/development,omitempty"` } type View struct { diff --git a/otelconf/go.mod b/otelconf/go.mod index f67373cec4e..ce3ac4f4188 100644 --- a/otelconf/go.mod +++ b/otelconf/go.mod @@ -4,6 +4,7 @@ go 1.24.0 require ( github.com/prometheus/client_golang v1.23.2 + github.com/prometheus/otlptranslator v1.0.0 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 @@ -41,7 +42,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.3 // indirect - github.com/prometheus/otlptranslator v1.0.0 // indirect github.com/prometheus/procfs v0.19.2 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect diff --git a/otelconf/log_test.go b/otelconf/log_test.go index 9bb6119ed7a..11f72146c96 100644 --- a/otelconf/log_test.go +++ b/otelconf/log_test.go @@ -713,33 +713,27 @@ func TestLoggerProviderOptions(t *testing.T) { require.NoError(t, err) res := resource.NewSchemaless(attribute.String("foo", "bar")) - // TODO: re-enable this once NewSDK is added - // sdk, err := NewSDK( - // WithOpenTelemetryConfiguration(cfg), - // WithLoggerProviderOptions(sdklog.WithProcessor(sdklog.NewSimpleProcessor(stdoutlogExporter))), - // WithLoggerProviderOptions(sdklog.WithResource(res)), - // ) - lp, shutdown, err := loggerProvider(configOptions{ - opentelemetryConfig: cfg, - loggerProviderOptions: []sdklog.LoggerProviderOption{sdklog.WithProcessor(sdklog.NewSimpleProcessor(stdoutlogExporter))}, - }, res) + sdk, err := NewSDK( + WithOpenTelemetryConfiguration(cfg), + WithLoggerProviderOptions(sdklog.WithProcessor(sdklog.NewSimpleProcessor(stdoutlogExporter))), + WithLoggerProviderOptions(sdklog.WithResource(res)), + ) require.NoError(t, err) defer func() { - assert.NoError(t, shutdown(t.Context())) + assert.NoError(t, sdk.Shutdown(t.Context())) }() // The exporter, which we passed in as an extra option to NewSDK, // should be wired up to the provider in addition to the // configuration-based OTLP exporter. - logger := lp.Logger("test") + logger := sdk.LoggerProvider().Logger("test") logger.Emit(t.Context(), log.Record{}) assert.NotZero(t, buf) assert.Equal(t, 1, calls) // Options provided by WithMeterProviderOptions may be overridden // by configuration, e.g. the resource is always defined via // configuration. - // TODO: re-enable this once NewSDK is added - // assert.NotContains(t, buf.String(), "foo") + assert.NotContains(t, buf.String(), "foo") } func Test_otlpGRPCLogExporter(t *testing.T) { diff --git a/otelconf/metric.go b/otelconf/metric.go index b6e12114401..338cb04a71f 100644 --- a/otelconf/metric.go +++ b/otelconf/metric.go @@ -18,6 +18,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/otlptranslator" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" @@ -403,18 +404,24 @@ func prometheusReader(ctx context.Context, prometheusConfig *ExperimentalPrometh return readerWithServer{reader, &server}, nil } +func validTranslationStrategy(strategy ExperimentalPrometheusMetricExporterTranslationStrategy) bool { + return strategy == ExperimentalPrometheusMetricExporterTranslationStrategyNoTranslation || + strategy == ExperimentalPrometheusMetricExporterTranslationStrategyNoUTF8EscapingWithSuffixes || + strategy == ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithSuffixes || + strategy == ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithoutSuffixes +} + func prometheusReaderOpts(prometheusConfig *ExperimentalPrometheusMetricExporter) ([]otelprom.Option, error) { var opts []otelprom.Option if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { opts = append(opts, otelprom.WithoutScopeInfo()) } - // TODO: fix the following to use with translation strategy - // if prometheusConfig.WithoutTypeSuffix != nil && *prometheusConfig.WithoutTypeSuffix { - // opts = append(opts, otelprom.WithoutCounterSuffixes()) //nolint:staticcheck // WithouTypeSuffix is deprecated, but we still need it for backwards compatibility. - // } - // if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { - // opts = append(opts, otelprom.WithoutUnits()) //nolint:staticcheck // WithouTypeSuffix is deprecated, but we still need it for backwards compatibility. - // } + if prometheusConfig.TranslationStrategy != nil { + if !validTranslationStrategy(*prometheusConfig.TranslationStrategy) { + return nil, newErrInvalid("translation strategy invalid") + } + opts = append(opts, otelprom.WithTranslationStrategy(otlptranslator.TranslationStrategyOption(*prometheusConfig.TranslationStrategy))) + } if prometheusConfig.WithResourceConstantLabels != nil { f, err := newIncludeExcludeFilter(prometheusConfig.WithResourceConstantLabels) if err != nil { diff --git a/otelconf/metric_test.go b/otelconf/metric_test.go index 0a4b623a2ab..b07f6d14a69 100644 --- a/otelconf/metric_test.go +++ b/otelconf/metric_test.go @@ -139,19 +139,14 @@ func TestMeterProviderOptions(t *testing.T) { require.NoError(t, err) res := resource.NewSchemaless(attribute.String("foo", "bar")) - // TODO: re-enable this once NewSDK is added - // sdk, err := NewSDK( - // WithOpenTelemetryConfiguration(cfg), - // WithMeterProviderOptions(sdkmetric.WithReader(sdkmetric.NewPeriodicReader(stdoutmetricExporter))), - // WithMeterProviderOptions(sdkmetric.WithResource(res)), - // ) - mp, shutdown, err := meterProvider(configOptions{ - opentelemetryConfig: cfg, - meterProviderOptions: []sdkmetric.Option{sdkmetric.WithReader(sdkmetric.NewPeriodicReader(stdoutmetricExporter))}, - }, res) + sdk, err := NewSDK( + WithOpenTelemetryConfiguration(cfg), + WithMeterProviderOptions(sdkmetric.WithReader(sdkmetric.NewPeriodicReader(stdoutmetricExporter))), + WithMeterProviderOptions(sdkmetric.WithResource(res)), + ) require.NoError(t, err) defer func() { - assert.NoError(t, shutdown(t.Context())) + assert.NoError(t, sdk.Shutdown(t.Context())) // The exporter, which we passed in as an extra option to NewSDK, // should be wired up to the provider in addition to the // configuration-based OTLP exporter. @@ -161,11 +156,10 @@ func TestMeterProviderOptions(t *testing.T) { // Options provided by WithMeterProviderOptions may be overridden // by configuration, e.g. the resource is always defined via // configuration. - // TODO: re-enable this once NewSDK is added - // assert.NotContains(t, buf.String(), "foo") + assert.NotContains(t, buf.String(), "foo") }() - counter, _ := mp.Meter("test").Int64Counter("counter") + counter, _ := sdk.MeterProvider().Meter("test").Int64Counter("counter") counter.Add(t.Context(), 1) } @@ -229,11 +223,10 @@ func TestReader(t *testing.T) { Pull: &PullMetricReader{ Exporter: PullMetricExporter{ PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ - Host: ptr("localhost"), - Port: ptr(0), - WithoutScopeInfo: ptr(true), - // WithoutUnits: ptr(true), - // WithoutTypeSuffix: ptr(true), + Host: ptr("localhost"), + Port: ptr(0), + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithoutSuffixes), WithResourceConstantLabels: &IncludeExclude{ Included: []string{"include"}, Excluded: []string{"exclude"}, @@ -244,6 +237,26 @@ func TestReader(t *testing.T) { }, wantReader: readerWithServer{promExporter, nil}, }, + { + name: "pull/prometheus/invalid strategy", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + Port: ptr(0), + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusMetricExporterTranslationStrategy("invalid-strategy")), + WithResourceConstantLabels: &IncludeExclude{ + Included: []string{"include"}, + Excluded: []string{"exclude"}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("translation strategy invalid"), + }, { name: "periodic/otlp-grpc-exporter", reader: MetricReader{ @@ -1357,22 +1370,20 @@ func TestPrometheusReaderOpts(t *testing.T) { { name: "all set", cfg: ExperimentalPrometheusMetricExporter{ - WithoutScopeInfo: ptr(true), - // WithoutTypeSuffix: ptr(true), - // WithoutUnits: ptr(true), + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithoutSuffixes), WithResourceConstantLabels: &IncludeExclude{}, }, - wantOptions: 2, + wantOptions: 3, }, { name: "all set false", cfg: ExperimentalPrometheusMetricExporter{ - WithoutScopeInfo: ptr(false), - // WithoutTypeSuffix: ptr(false), - // WithoutUnits: ptr(false), + WithoutScopeInfo: ptr(false), + TranslationStrategy: ptr(ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithSuffixes), WithResourceConstantLabels: &IncludeExclude{}, }, - wantOptions: 1, + wantOptions: 2, }, } for _, tt := range testCases { @@ -1402,11 +1413,10 @@ func TestPrometheusIPv6(t *testing.T) { t.Run(tt.name, func(t *testing.T) { port := 0 cfg := ExperimentalPrometheusMetricExporter{ - Host: &tt.host, - Port: &port, - WithoutScopeInfo: ptr(true), - // WithoutTypeSuffix: ptr(true), - // WithoutUnits: ptr(true), + Host: &tt.host, + Port: &port, + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithSuffixes), WithResourceConstantLabels: &IncludeExclude{}, } diff --git a/otelconf/testdata/v1.0.0.json b/otelconf/testdata/v1.0.0.json index f85579dd4b3..fe6a130b3f4 100644 --- a/otelconf/testdata/v1.0.0.json +++ b/otelconf/testdata/v1.0.0.json @@ -515,21 +515,7 @@ "excluded": [ "process.command_args" ] - }, - "detectors": [ - { - "container": null - }, - { - "host": null - }, - { - "process": null - }, - { - "service": null - } - ] + } }, "schema_url": "https://opentelemetry.io/schemas/1.16.0" }, diff --git a/otelconf/testdata/v1.0.0.yaml b/otelconf/testdata/v1.0.0.yaml index 2447b8a71e9..2ebbfd0ee38 100644 --- a/otelconf/testdata/v1.0.0.yaml +++ b/otelconf/testdata/v1.0.0.yaml @@ -841,15 +841,16 @@ resource: # Configure resource detectors. # Resource detector names are dependent on the SDK language ecosystem. Please consult documentation for each respective language. # If omitted or null, no resource detectors are enabled. - detectors: - - # Enable the container resource detector, which populates container.* attributes. - container: - - # Enable the host resource detector, which populates host.* and os.* attributes. - host: - - # Enable the process resource detector, which populates process.* attributes. - process: - - # Enable the service detector, which populates service.name based on the OTEL_SERVICE_NAME environment variable and service.instance.id. - service: + # TODO: implement resource detectors + # detectors: + # - # Enable the container resource detector, which populates container.* attributes. + # container: + # - # Enable the host resource detector, which populates host.* and os.* attributes. + # host: + # - # Enable the process resource detector, which populates process.* attributes. + # process: + # - # Enable the service detector, which populates service.name based on the OTEL_SERVICE_NAME environment variable and service.instance.id. + # service: # Configure resource schema URL. # If omitted or null, no schema URL is used. schema_url: https://opentelemetry.io/schemas/1.16.0 diff --git a/otelconf/testdata/v1.0.0_env_var.yaml b/otelconf/testdata/v1.0.0_env_var.yaml index f3c24f83e9e..2f520a2c6db 100644 --- a/otelconf/testdata/v1.0.0_env_var.yaml +++ b/otelconf/testdata/v1.0.0_env_var.yaml @@ -1,3 +1,4 @@ +file_format: "1.0" disabled: ${OTEL_SDK_DISABLED} attribute_limits: attribute_value_length_limit: ${OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT} diff --git a/otelconf/trace.go b/otelconf/trace.go index 54638a742be..c7e57d2867c 100644 --- a/otelconf/trace.go +++ b/otelconf/trace.go @@ -30,7 +30,7 @@ func tracerProvider(cfg configOptions, res *resource.Resource) (trace.TracerProv } provider, ok := cfg.opentelemetryConfig.TracerProvider.(*TracerProviderJson) if !ok { - return noop.NewTracerProvider(), noopShutdown, newErrInvalid("invalid tracer provider") + return noop.NewTracerProvider(), noopShutdown, newErrInvalid("tracer_provider") } opts := append(cfg.tracerProviderOptions, sdktrace.WithResource(res)) diff --git a/otelconf/trace_test.go b/otelconf/trace_test.go index 760a5d2fe19..5ebd34a76c9 100644 --- a/otelconf/trace_test.go +++ b/otelconf/trace_test.go @@ -157,26 +157,20 @@ func TestTracerProviderOptions(t *testing.T) { require.NoError(t, err) res := resource.NewSchemaless(attribute.String("foo", "bar")) - // TODO: re-enable this once NewSDK is added - // sdk, err := NewSDK( - // WithOpenTelemetryConfiguration(cfg), - // WithTracerProviderOptions(sdktrace.WithSyncer(stdouttraceExporter)), - // WithTracerProviderOptions(sdktrace.WithResource(res)), - // ) - tp, shutdown, err := tracerProvider(configOptions{ - ctx: t.Context(), - opentelemetryConfig: cfg, - tracerProviderOptions: []sdktrace.TracerProviderOption{sdktrace.WithSyncer(stdouttraceExporter)}, - }, res) + sdk, err := NewSDK( + WithOpenTelemetryConfiguration(cfg), + WithTracerProviderOptions(sdktrace.WithSyncer(stdouttraceExporter)), + WithTracerProviderOptions(sdktrace.WithResource(res)), + ) require.NoError(t, err) defer func() { - assert.NoError(t, shutdown(t.Context())) + assert.NoError(t, sdk.Shutdown(t.Context())) }() // The exporter, which we passed in as an extra option to NewSDK, // should be wired up to the provider in addition to the // configuration-based OTLP exporter. - tracer := tp.Tracer("test") + tracer := sdk.TracerProvider().Tracer("test") _, span := tracer.Start(t.Context(), "span") span.End() assert.NotZero(t, buf) @@ -184,8 +178,7 @@ func TestTracerProviderOptions(t *testing.T) { // Options provided by WithMeterProviderOptions may be overridden // by configuration, e.g. the resource is always defined via // configuration. - // TODO: re-enable this once NewSDK is added - // assert.NotContains(t, buf.String(), "foo") + assert.NotContains(t, buf.String(), "foo") } func TestSpanProcessor(t *testing.T) { diff --git a/otelconf/x/generated_config.go b/otelconf/x/generated_config.go new file mode 100644 index 00000000000..f68c46411f7 --- /dev/null +++ b/otelconf/x/generated_config.go @@ -0,0 +1,986 @@ +// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. + +package x + +type Aggregation struct { + // Base2ExponentialBucketHistogram corresponds to the JSON schema field + // "base2_exponential_bucket_histogram". + Base2ExponentialBucketHistogram *Base2ExponentialBucketHistogramAggregation `json:"base2_exponential_bucket_histogram,omitempty" yaml:"base2_exponential_bucket_histogram,omitempty" mapstructure:"base2_exponential_bucket_histogram,omitempty"` + + // Default corresponds to the JSON schema field "default". + Default DefaultAggregation `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` + + // Drop corresponds to the JSON schema field "drop". + Drop DropAggregation `json:"drop,omitempty" yaml:"drop,omitempty" mapstructure:"drop,omitempty"` + + // ExplicitBucketHistogram corresponds to the JSON schema field + // "explicit_bucket_histogram". + ExplicitBucketHistogram *ExplicitBucketHistogramAggregation `json:"explicit_bucket_histogram,omitempty" yaml:"explicit_bucket_histogram,omitempty" mapstructure:"explicit_bucket_histogram,omitempty"` + + // LastValue corresponds to the JSON schema field "last_value". + LastValue LastValueAggregation `json:"last_value,omitempty" yaml:"last_value,omitempty" mapstructure:"last_value,omitempty"` + + // Sum corresponds to the JSON schema field "sum". + Sum SumAggregation `json:"sum,omitempty" yaml:"sum,omitempty" mapstructure:"sum,omitempty"` +} + +type AlwaysOffSampler map[string]interface{} + +type AlwaysOnSampler map[string]interface{} + +type AttributeLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type AttributeNameValue struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Type corresponds to the JSON schema field "type". + Type *AttributeType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value interface{} `json:"value" yaml:"value" mapstructure:"value"` +} + +type AttributeType struct { + Value interface{} +} + +type B3MultiPropagator map[string]interface{} + +type B3Propagator map[string]interface{} + +type BaggagePropagator map[string]interface{} + +type Base2ExponentialBucketHistogramAggregation struct { + // MaxScale corresponds to the JSON schema field "max_scale". + MaxScale *int `json:"max_scale,omitempty" yaml:"max_scale,omitempty" mapstructure:"max_scale,omitempty"` + + // MaxSize corresponds to the JSON schema field "max_size". + MaxSize *int `json:"max_size,omitempty" yaml:"max_size,omitempty" mapstructure:"max_size,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +type BatchLogRecordProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +type BatchSpanProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +type CardinalityLimits struct { + // Counter corresponds to the JSON schema field "counter". + Counter *int `json:"counter,omitempty" yaml:"counter,omitempty" mapstructure:"counter,omitempty"` + + // Default corresponds to the JSON schema field "default". + Default *int `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` + + // Gauge corresponds to the JSON schema field "gauge". + Gauge *int `json:"gauge,omitempty" yaml:"gauge,omitempty" mapstructure:"gauge,omitempty"` + + // Histogram corresponds to the JSON schema field "histogram". + Histogram *int `json:"histogram,omitempty" yaml:"histogram,omitempty" mapstructure:"histogram,omitempty"` + + // ObservableCounter corresponds to the JSON schema field "observable_counter". + ObservableCounter *int `json:"observable_counter,omitempty" yaml:"observable_counter,omitempty" mapstructure:"observable_counter,omitempty"` + + // ObservableGauge corresponds to the JSON schema field "observable_gauge". + ObservableGauge *int `json:"observable_gauge,omitempty" yaml:"observable_gauge,omitempty" mapstructure:"observable_gauge,omitempty"` + + // ObservableUpDownCounter corresponds to the JSON schema field + // "observable_up_down_counter". + ObservableUpDownCounter *int `json:"observable_up_down_counter,omitempty" yaml:"observable_up_down_counter,omitempty" mapstructure:"observable_up_down_counter,omitempty"` + + // UpDownCounter corresponds to the JSON schema field "up_down_counter". + UpDownCounter *int `json:"up_down_counter,omitempty" yaml:"up_down_counter,omitempty" mapstructure:"up_down_counter,omitempty"` +} + +type ConsoleExporter map[string]interface{} + +type DefaultAggregation map[string]interface{} + +type DropAggregation map[string]interface{} + +type ExemplarFilter string + +const ExemplarFilterAlwaysOff ExemplarFilter = "always_off" +const ExemplarFilterAlwaysOn ExemplarFilter = "always_on" +const ExemplarFilterTraceBased ExemplarFilter = "trace_based" + +type ExperimentalContainerResourceDetector map[string]interface{} + +type ExperimentalGeneralInstrumentation struct { + // Http corresponds to the JSON schema field "http". + Http *ExperimentalHttpInstrumentation `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + + // Peer corresponds to the JSON schema field "peer". + Peer *ExperimentalPeerInstrumentation `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` +} + +type ExperimentalHostResourceDetector map[string]interface{} + +type ExperimentalHttpInstrumentation struct { + // Client corresponds to the JSON schema field "client". + Client *ExperimentalHttpInstrumentationClient `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` + + // Server corresponds to the JSON schema field "server". + Server *ExperimentalHttpInstrumentationServer `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` +} + +type ExperimentalHttpInstrumentationClient struct { + // RequestCapturedHeaders corresponds to the JSON schema field + // "request_captured_headers". + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // ResponseCapturedHeaders corresponds to the JSON schema field + // "response_captured_headers". + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type ExperimentalHttpInstrumentationServer struct { + // RequestCapturedHeaders corresponds to the JSON schema field + // "request_captured_headers". + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // ResponseCapturedHeaders corresponds to the JSON schema field + // "response_captured_headers". + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type ExperimentalLanguageSpecificInstrumentation map[string]interface{} + +type ExperimentalLoggerConfig struct { + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` +} + +type ExperimentalLoggerConfigurator struct { + // DefaultConfig corresponds to the JSON schema field "default_config". + DefaultConfig *ExperimentalLoggerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Loggers corresponds to the JSON schema field "loggers". + Loggers []ExperimentalLoggerMatcherAndConfig `json:"loggers,omitempty" yaml:"loggers,omitempty" mapstructure:"loggers,omitempty"` +} + +type ExperimentalLoggerMatcherAndConfig struct { + // Config corresponds to the JSON schema field "config". + Config *ExperimentalLoggerConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ExperimentalMeterConfig struct { + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` +} + +type ExperimentalMeterConfigurator struct { + // DefaultConfig corresponds to the JSON schema field "default_config". + DefaultConfig *ExperimentalMeterConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Meters corresponds to the JSON schema field "meters". + Meters []ExperimentalMeterMatcherAndConfig `json:"meters,omitempty" yaml:"meters,omitempty" mapstructure:"meters,omitempty"` +} + +type ExperimentalMeterMatcherAndConfig struct { + // Config corresponds to the JSON schema field "config". + Config *ExperimentalMeterConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ExperimentalOTLPFileExporter struct { + // OutputStream corresponds to the JSON schema field "output_stream". + OutputStream *string `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` +} + +type ExperimentalOTLPFileMetricExporter struct { + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // OutputStream corresponds to the JSON schema field "output_stream". + OutputStream *string `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` +} + +type ExperimentalPeerInstrumentation struct { + // ServiceMapping corresponds to the JSON schema field "service_mapping". + ServiceMapping []ExperimentalPeerInstrumentationServiceMappingElem `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` +} + +type ExperimentalPeerInstrumentationServiceMappingElem struct { + // Peer corresponds to the JSON schema field "peer". + Peer string `json:"peer" yaml:"peer" mapstructure:"peer"` + + // Service corresponds to the JSON schema field "service". + Service string `json:"service" yaml:"service" mapstructure:"service"` +} + +type ExperimentalProcessResourceDetector map[string]interface{} + +type ExperimentalPrometheusMetricExporter struct { + // Host corresponds to the JSON schema field "host". + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Port corresponds to the JSON schema field "port". + Port *int `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // TranslationStrategy corresponds to the JSON schema field + // "translation_strategy". + TranslationStrategy *ExperimentalPrometheusMetricExporterTranslationStrategy `json:"translation_strategy,omitempty" yaml:"translation_strategy,omitempty" mapstructure:"translation_strategy,omitempty"` + + // WithResourceConstantLabels corresponds to the JSON schema field + // "with_resource_constant_labels". + WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` + + // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". + WithoutScopeInfo *bool `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` +} + +type ExperimentalPrometheusMetricExporterTranslationStrategy string + +const ExperimentalPrometheusMetricExporterTranslationStrategyNoTranslation ExperimentalPrometheusMetricExporterTranslationStrategy = "NoTranslation" +const ExperimentalPrometheusMetricExporterTranslationStrategyNoUTF8EscapingWithSuffixes ExperimentalPrometheusMetricExporterTranslationStrategy = "NoUTF8EscapingWithSuffixes" +const ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithSuffixes ExperimentalPrometheusMetricExporterTranslationStrategy = "UnderscoreEscapingWithSuffixes" +const ExperimentalPrometheusMetricExporterTranslationStrategyUnderscoreEscapingWithoutSuffixes ExperimentalPrometheusMetricExporterTranslationStrategy = "UnderscoreEscapingWithoutSuffixes" + +type ExperimentalResourceDetection struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes *IncludeExclude `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // Detectors corresponds to the JSON schema field "detectors". + Detectors []ExperimentalResourceDetector `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` +} + +type ExperimentalResourceDetector struct { + // Container corresponds to the JSON schema field "container". + Container ExperimentalContainerResourceDetector `json:"container,omitempty" yaml:"container,omitempty" mapstructure:"container,omitempty"` + + // Host corresponds to the JSON schema field "host". + Host ExperimentalHostResourceDetector `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Process corresponds to the JSON schema field "process". + Process ExperimentalProcessResourceDetector `json:"process,omitempty" yaml:"process,omitempty" mapstructure:"process,omitempty"` + + // Service corresponds to the JSON schema field "service". + Service ExperimentalServiceResourceDetector `json:"service,omitempty" yaml:"service,omitempty" mapstructure:"service,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type ExperimentalServiceResourceDetector map[string]interface{} + +type ExperimentalTracerConfig struct { + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` +} + +type ExperimentalTracerConfigurator struct { + // DefaultConfig corresponds to the JSON schema field "default_config". + DefaultConfig *ExperimentalTracerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Tracers corresponds to the JSON schema field "tracers". + Tracers []ExperimentalTracerMatcherAndConfig `json:"tracers,omitempty" yaml:"tracers,omitempty" mapstructure:"tracers,omitempty"` +} + +type ExperimentalTracerMatcherAndConfig struct { + // Config corresponds to the JSON schema field "config". + Config *ExperimentalTracerConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ExplicitBucketHistogramAggregation struct { + // Boundaries corresponds to the JSON schema field "boundaries". + Boundaries []float64 `json:"boundaries,omitempty" yaml:"boundaries,omitempty" mapstructure:"boundaries,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +type ExporterDefaultHistogramAggregation string + +const ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram ExporterDefaultHistogramAggregation = "base2_exponential_bucket_histogram" +const ExporterDefaultHistogramAggregationExplicitBucketHistogram ExporterDefaultHistogramAggregation = "explicit_bucket_histogram" + +type ExporterTemporalityPreference string + +const ExporterTemporalityPreferenceCumulative ExporterTemporalityPreference = "cumulative" +const ExporterTemporalityPreferenceDelta ExporterTemporalityPreference = "delta" +const ExporterTemporalityPreferenceLowMemory ExporterTemporalityPreference = "low_memory" + +type IncludeExclude struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` +} + +type InstrumentType string + +const InstrumentTypeCounter InstrumentType = "counter" +const InstrumentTypeGauge InstrumentType = "gauge" +const InstrumentTypeHistogram InstrumentType = "histogram" +const InstrumentTypeObservableCounter InstrumentType = "observable_counter" +const InstrumentTypeObservableGauge InstrumentType = "observable_gauge" +const InstrumentTypeObservableUpDownCounter InstrumentType = "observable_up_down_counter" +const InstrumentTypeUpDownCounter InstrumentType = "up_down_counter" + +type InstrumentationJson struct { + // Cpp corresponds to the JSON schema field "cpp". + Cpp ExperimentalLanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` + + // Dotnet corresponds to the JSON schema field "dotnet". + Dotnet ExperimentalLanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` + + // Erlang corresponds to the JSON schema field "erlang". + Erlang ExperimentalLanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` + + // General corresponds to the JSON schema field "general". + General *ExperimentalGeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` + + // Go corresponds to the JSON schema field "go". + Go ExperimentalLanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` + + // Java corresponds to the JSON schema field "java". + Java ExperimentalLanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` + + // Js corresponds to the JSON schema field "js". + Js ExperimentalLanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` + + // Php corresponds to the JSON schema field "php". + Php ExperimentalLanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` + + // Python corresponds to the JSON schema field "python". + Python ExperimentalLanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` + + // Ruby corresponds to the JSON schema field "ruby". + Ruby ExperimentalLanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` + + // Rust corresponds to the JSON schema field "rust". + Rust ExperimentalLanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` + + // Swift corresponds to the JSON schema field "swift". + Swift ExperimentalLanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` +} + +type JaegerPropagator map[string]interface{} + +type JaegerRemoteSampler struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // InitialSampler corresponds to the JSON schema field "initial_sampler". + InitialSampler *Sampler `json:"initial_sampler,omitempty" yaml:"initial_sampler,omitempty" mapstructure:"initial_sampler,omitempty"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` +} + +type LastValueAggregation map[string]interface{} + +type LogRecordExporter struct { + // Console corresponds to the JSON schema field "console". + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLPFileDevelopment corresponds to the JSON schema field + // "otlp_file/development". + OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". + OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // OTLPHttp corresponds to the JSON schema field "otlp_http". + OTLPHttp *OTLPHttpExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type LogRecordLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` +} + +type LogRecordProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchLogRecordProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleLogRecordProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type LoggerProviderJson struct { + // Limits corresponds to the JSON schema field "limits". + Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // LoggerConfiguratorDevelopment corresponds to the JSON schema field + // "logger_configurator/development". + LoggerConfiguratorDevelopment *ExperimentalLoggerConfigurator `json:"logger_configurator/development,omitempty" yaml:"logger_configurator/development,omitempty" mapstructure:"logger_configurator/development,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []LogRecordProcessor `json:"processors" yaml:"processors" mapstructure:"processors"` +} + +type MeterProviderJson struct { + // ExemplarFilter corresponds to the JSON schema field "exemplar_filter". + ExemplarFilter *ExemplarFilter `json:"exemplar_filter,omitempty" yaml:"exemplar_filter,omitempty" mapstructure:"exemplar_filter,omitempty"` + + // MeterConfiguratorDevelopment corresponds to the JSON schema field + // "meter_configurator/development". + MeterConfiguratorDevelopment *ExperimentalMeterConfigurator `json:"meter_configurator/development,omitempty" yaml:"meter_configurator/development,omitempty" mapstructure:"meter_configurator/development,omitempty"` + + // Readers corresponds to the JSON schema field "readers". + Readers []MetricReader `json:"readers" yaml:"readers" mapstructure:"readers"` + + // Views corresponds to the JSON schema field "views". + Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` +} + +type MetricProducer struct { + // Opencensus corresponds to the JSON schema field "opencensus". + Opencensus OpenCensusMetricProducer `json:"opencensus,omitempty" yaml:"opencensus,omitempty" mapstructure:"opencensus,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type MetricReader struct { + // Periodic corresponds to the JSON schema field "periodic". + Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` + + // Pull corresponds to the JSON schema field "pull". + Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` +} + +type NameStringValuePair struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Value corresponds to the JSON schema field "value". + Value *string `json:"value" yaml:"value" mapstructure:"value"` +} + +type OTLPGrpcExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPGrpcMetricExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPHttpEncoding string + +const OTLPHttpEncodingJson OTLPHttpEncoding = "json" +const OTLPHttpEncodingProtobuf OTLPHttpEncoding = "protobuf" + +type OTLPHttpExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Encoding corresponds to the JSON schema field "encoding". + Encoding *OTLPHttpEncoding `json:"encoding,omitempty" yaml:"encoding,omitempty" mapstructure:"encoding,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPHttpMetricExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Encoding corresponds to the JSON schema field "encoding". + Encoding *OTLPHttpEncoding `json:"encoding,omitempty" yaml:"encoding,omitempty" mapstructure:"encoding,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OpenCensusMetricProducer map[string]interface{} + +type OpenTelemetryConfiguration struct { + // AttributeLimits corresponds to the JSON schema field "attribute_limits". + AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` + + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` + + // FileFormat corresponds to the JSON schema field "file_format". + FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` + + // InstrumentationDevelopment corresponds to the JSON schema field + // "instrumentation/development". + InstrumentationDevelopment OpenTelemetryConfigurationInstrumentationDevelopment `json:"instrumentation/development,omitempty" yaml:"instrumentation/development,omitempty" mapstructure:"instrumentation/development,omitempty"` + + // LogLevel corresponds to the JSON schema field "log_level". + LogLevel *string `json:"log_level,omitempty" yaml:"log_level,omitempty" mapstructure:"log_level,omitempty"` + + // LoggerProvider corresponds to the JSON schema field "logger_provider". + LoggerProvider OpenTelemetryConfigurationLoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` + + // MeterProvider corresponds to the JSON schema field "meter_provider". + MeterProvider OpenTelemetryConfigurationMeterProvider `json:"meter_provider,omitempty" yaml:"meter_provider,omitempty" mapstructure:"meter_provider,omitempty"` + + // Propagator corresponds to the JSON schema field "propagator". + Propagator OpenTelemetryConfigurationPropagator `json:"propagator,omitempty" yaml:"propagator,omitempty" mapstructure:"propagator,omitempty"` + + // Resource corresponds to the JSON schema field "resource". + Resource OpenTelemetryConfigurationResource `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` + + // TracerProvider corresponds to the JSON schema field "tracer_provider". + TracerProvider OpenTelemetryConfigurationTracerProvider `json:"tracer_provider,omitempty" yaml:"tracer_provider,omitempty" mapstructure:"tracer_provider,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type OpenTelemetryConfigurationInstrumentationDevelopment interface{} + +type OpenTelemetryConfigurationLoggerProvider interface{} + +type OpenTelemetryConfigurationMeterProvider interface{} + +type OpenTelemetryConfigurationPropagator interface{} + +type OpenTelemetryConfigurationResource interface{} + +type OpenTelemetryConfigurationTracerProvider interface{} + +type OpenTracingPropagator map[string]interface{} + +type ParentBasedSampler struct { + // LocalParentNotSampled corresponds to the JSON schema field + // "local_parent_not_sampled". + LocalParentNotSampled *Sampler `json:"local_parent_not_sampled,omitempty" yaml:"local_parent_not_sampled,omitempty" mapstructure:"local_parent_not_sampled,omitempty"` + + // LocalParentSampled corresponds to the JSON schema field "local_parent_sampled". + LocalParentSampled *Sampler `json:"local_parent_sampled,omitempty" yaml:"local_parent_sampled,omitempty" mapstructure:"local_parent_sampled,omitempty"` + + // RemoteParentNotSampled corresponds to the JSON schema field + // "remote_parent_not_sampled". + RemoteParentNotSampled *Sampler `json:"remote_parent_not_sampled,omitempty" yaml:"remote_parent_not_sampled,omitempty" mapstructure:"remote_parent_not_sampled,omitempty"` + + // RemoteParentSampled corresponds to the JSON schema field + // "remote_parent_sampled". + RemoteParentSampled *Sampler `json:"remote_parent_sampled,omitempty" yaml:"remote_parent_sampled,omitempty" mapstructure:"remote_parent_sampled,omitempty"` + + // Root corresponds to the JSON schema field "root". + Root *Sampler `json:"root,omitempty" yaml:"root,omitempty" mapstructure:"root,omitempty"` +} + +type PeriodicMetricReader struct { + // CardinalityLimits corresponds to the JSON schema field "cardinality_limits". + CardinalityLimits *CardinalityLimits `json:"cardinality_limits,omitempty" yaml:"cardinality_limits,omitempty" mapstructure:"cardinality_limits,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter PushMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` + + // Producers corresponds to the JSON schema field "producers". + Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type PropagatorJson struct { + // Composite corresponds to the JSON schema field "composite". + Composite []TextMapPropagator `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` + + // CompositeList corresponds to the JSON schema field "composite_list". + CompositeList *string `json:"composite_list,omitempty" yaml:"composite_list,omitempty" mapstructure:"composite_list,omitempty"` +} + +type PullMetricExporter struct { + // PrometheusDevelopment corresponds to the JSON schema field + // "prometheus/development". + PrometheusDevelopment *ExperimentalPrometheusMetricExporter `json:"prometheus/development,omitempty" yaml:"prometheus/development,omitempty" mapstructure:"prometheus/development,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type PullMetricReader struct { + // CardinalityLimits corresponds to the JSON schema field "cardinality_limits". + CardinalityLimits *CardinalityLimits `json:"cardinality_limits,omitempty" yaml:"cardinality_limits,omitempty" mapstructure:"cardinality_limits,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter PullMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Producers corresponds to the JSON schema field "producers". + Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` +} + +type PushMetricExporter struct { + // Console corresponds to the JSON schema field "console". + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLPFileDevelopment corresponds to the JSON schema field + // "otlp_file/development". + OTLPFileDevelopment *ExperimentalOTLPFileMetricExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". + OTLPGrpc *OTLPGrpcMetricExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // OTLPHttp corresponds to the JSON schema field "otlp_http". + OTLPHttp *OTLPHttpMetricExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type ResourceJson struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes []AttributeNameValue `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // AttributesList corresponds to the JSON schema field "attributes_list". + AttributesList *string `json:"attributes_list,omitempty" yaml:"attributes_list,omitempty" mapstructure:"attributes_list,omitempty"` + + // DetectionDevelopment corresponds to the JSON schema field + // "detection/development". + DetectionDevelopment *ExperimentalResourceDetection `json:"detection/development,omitempty" yaml:"detection/development,omitempty" mapstructure:"detection/development,omitempty"` + + // SchemaUrl corresponds to the JSON schema field "schema_url". + SchemaUrl *string `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` +} + +type Sampler struct { + // AlwaysOff corresponds to the JSON schema field "always_off". + AlwaysOff AlwaysOffSampler `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` + + // AlwaysOn corresponds to the JSON schema field "always_on". + AlwaysOn AlwaysOnSampler `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` + + // JaegerRemote corresponds to the JSON schema field "jaeger_remote". + JaegerRemote *JaegerRemoteSampler `json:"jaeger_remote,omitempty" yaml:"jaeger_remote,omitempty" mapstructure:"jaeger_remote,omitempty"` + + // ParentBased corresponds to the JSON schema field "parent_based". + ParentBased *ParentBasedSampler `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` + + // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". + TraceIDRatioBased *TraceIDRatioBasedSampler `json:"trace_id_ratio_based,omitempty" yaml:"trace_id_ratio_based,omitempty" mapstructure:"trace_id_ratio_based,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SimpleLogRecordProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +type SimpleSpanProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +type SpanExporter struct { + // Console corresponds to the JSON schema field "console". + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLPFileDevelopment corresponds to the JSON schema field + // "otlp_file/development". + OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". + OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // OTLPHttp corresponds to the JSON schema field "otlp_http". + OTLPHttp *OTLPHttpExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + // Zipkin corresponds to the JSON schema field "zipkin". + Zipkin *ZipkinSpanExporter `json:"zipkin,omitempty" yaml:"zipkin,omitempty" mapstructure:"zipkin,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SpanLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` + + // EventAttributeCountLimit corresponds to the JSON schema field + // "event_attribute_count_limit". + EventAttributeCountLimit *int `json:"event_attribute_count_limit,omitempty" yaml:"event_attribute_count_limit,omitempty" mapstructure:"event_attribute_count_limit,omitempty"` + + // EventCountLimit corresponds to the JSON schema field "event_count_limit". + EventCountLimit *int `json:"event_count_limit,omitempty" yaml:"event_count_limit,omitempty" mapstructure:"event_count_limit,omitempty"` + + // LinkAttributeCountLimit corresponds to the JSON schema field + // "link_attribute_count_limit". + LinkAttributeCountLimit *int `json:"link_attribute_count_limit,omitempty" yaml:"link_attribute_count_limit,omitempty" mapstructure:"link_attribute_count_limit,omitempty"` + + // LinkCountLimit corresponds to the JSON schema field "link_count_limit". + LinkCountLimit *int `json:"link_count_limit,omitempty" yaml:"link_count_limit,omitempty" mapstructure:"link_count_limit,omitempty"` +} + +type SpanProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchSpanProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleSpanProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SumAggregation map[string]interface{} + +type TextMapPropagator struct { + // B3 corresponds to the JSON schema field "b3". + B3 B3Propagator `json:"b3,omitempty" yaml:"b3,omitempty" mapstructure:"b3,omitempty"` + + // B3Multi corresponds to the JSON schema field "b3multi". + B3Multi B3MultiPropagator `json:"b3multi,omitempty" yaml:"b3multi,omitempty" mapstructure:"b3multi,omitempty"` + + // Baggage corresponds to the JSON schema field "baggage". + Baggage BaggagePropagator `json:"baggage,omitempty" yaml:"baggage,omitempty" mapstructure:"baggage,omitempty"` + + // Jaeger corresponds to the JSON schema field "jaeger". + Jaeger JaegerPropagator `json:"jaeger,omitempty" yaml:"jaeger,omitempty" mapstructure:"jaeger,omitempty"` + + // Ottrace corresponds to the JSON schema field "ottrace". + Ottrace OpenTracingPropagator `json:"ottrace,omitempty" yaml:"ottrace,omitempty" mapstructure:"ottrace,omitempty"` + + // Tracecontext corresponds to the JSON schema field "tracecontext". + Tracecontext TraceContextPropagator `json:"tracecontext,omitempty" yaml:"tracecontext,omitempty" mapstructure:"tracecontext,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type TraceContextPropagator map[string]interface{} + +type TraceIDRatioBasedSampler struct { + // Ratio corresponds to the JSON schema field "ratio". + Ratio *float64 `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` +} + +type TracerProviderJson struct { + // Limits corresponds to the JSON schema field "limits". + Limits *SpanLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []SpanProcessor `json:"processors" yaml:"processors" mapstructure:"processors"` + + // Sampler corresponds to the JSON schema field "sampler". + Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` + + // TracerConfiguratorDevelopment corresponds to the JSON schema field + // "tracer_configurator/development". + TracerConfiguratorDevelopment *ExperimentalTracerConfigurator `json:"tracer_configurator/development,omitempty" yaml:"tracer_configurator/development,omitempty" mapstructure:"tracer_configurator/development,omitempty"` +} + +type View struct { + // Selector corresponds to the JSON schema field "selector". + Selector *ViewSelector `json:"selector,omitempty" yaml:"selector,omitempty" mapstructure:"selector,omitempty"` + + // Stream corresponds to the JSON schema field "stream". + Stream *ViewStream `json:"stream,omitempty" yaml:"stream,omitempty" mapstructure:"stream,omitempty"` +} + +type ViewSelector struct { + // InstrumentName corresponds to the JSON schema field "instrument_name". + InstrumentName *string `json:"instrument_name,omitempty" yaml:"instrument_name,omitempty" mapstructure:"instrument_name,omitempty"` + + // InstrumentType corresponds to the JSON schema field "instrument_type". + InstrumentType *InstrumentType `json:"instrument_type,omitempty" yaml:"instrument_type,omitempty" mapstructure:"instrument_type,omitempty"` + + // MeterName corresponds to the JSON schema field "meter_name". + MeterName *string `json:"meter_name,omitempty" yaml:"meter_name,omitempty" mapstructure:"meter_name,omitempty"` + + // MeterSchemaUrl corresponds to the JSON schema field "meter_schema_url". + MeterSchemaUrl *string `json:"meter_schema_url,omitempty" yaml:"meter_schema_url,omitempty" mapstructure:"meter_schema_url,omitempty"` + + // MeterVersion corresponds to the JSON schema field "meter_version". + MeterVersion *string `json:"meter_version,omitempty" yaml:"meter_version,omitempty" mapstructure:"meter_version,omitempty"` + + // Unit corresponds to the JSON schema field "unit". + Unit *string `json:"unit,omitempty" yaml:"unit,omitempty" mapstructure:"unit,omitempty"` +} + +type ViewStream struct { + // Aggregation corresponds to the JSON schema field "aggregation". + Aggregation *Aggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` + + // AggregationCardinalityLimit corresponds to the JSON schema field + // "aggregation_cardinality_limit". + AggregationCardinalityLimit *int `json:"aggregation_cardinality_limit,omitempty" yaml:"aggregation_cardinality_limit,omitempty" mapstructure:"aggregation_cardinality_limit,omitempty"` + + // AttributeKeys corresponds to the JSON schema field "attribute_keys". + AttributeKeys *IncludeExclude `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` + + // Description corresponds to the JSON schema field "description". + Description *string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ZipkinSpanExporter struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} diff --git a/otelconf/x/go.mod b/otelconf/x/go.mod new file mode 100644 index 00000000000..3c7227c619b --- /dev/null +++ b/otelconf/x/go.mod @@ -0,0 +1,3 @@ +module go.opentelemetry.io/contrib/otelconf/x + +go 1.24.0