diff --git a/Makefile b/Makefile index 5a0d611e0fd..b3c2664d19d 100644 --- a/Makefile +++ b/Makefile @@ -323,7 +323,7 @@ update-all-otel-deps: OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR=tmp/opentelemetry-configuration # The SHA matching the current version of the opentelemetry-configuration schema to use -OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=v0.3.0 +OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=v0.4.0 # Cleanup temporary directory genjsonschema-cleanup: diff --git a/otelconf/v0.4.0/config.go b/otelconf/v0.4.0/config.go new file mode 100644 index 00000000000..5c82a88e9ee --- /dev/null +++ b/otelconf/v0.4.0/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconf provides an OpenTelemetry declarative configuration SDK. +package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.3.0" + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "os" + + yaml "sigs.k8s.io/yaml/goyaml.v3" + + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/log" + nooplog "go.opentelemetry.io/otel/log/noop" + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + nooptrace "go.opentelemetry.io/otel/trace/noop" +) + +const ( + compressionGzip = "gzip" + compressionNone = "none" +) + +type configOptions struct { + ctx context.Context + opentelemetryConfig OpenTelemetryConfiguration +} + +type shutdownFunc func(context.Context) error + +func noopShutdown(context.Context) error { + return nil +} + +// SDK is a struct that contains all the providers +// configured via the configuration model. +type SDK struct { + meterProvider metric.MeterProvider + tracerProvider trace.TracerProvider + loggerProvider log.LoggerProvider + shutdown shutdownFunc +} + +// TracerProvider returns a configured trace.TracerProvider. +func (s *SDK) TracerProvider() trace.TracerProvider { + return s.tracerProvider +} + +// MeterProvider returns a configured metric.MeterProvider. +func (s *SDK) MeterProvider() metric.MeterProvider { + return s.meterProvider +} + +// LoggerProvider returns a configured log.LoggerProvider. +func (s *SDK) LoggerProvider() log.LoggerProvider { + return s.loggerProvider +} + +// Shutdown calls shutdown on all configured providers. +func (s *SDK) Shutdown(ctx context.Context) error { + return s.shutdown(ctx) +} + +var noopSDK = SDK{ + loggerProvider: nooplog.LoggerProvider{}, + meterProvider: noopmetric.MeterProvider{}, + tracerProvider: nooptrace.TracerProvider{}, + shutdown: func(ctx context.Context) error { return nil }, +} + +// NewSDK creates SDK providers based on the configuration model. +func NewSDK(opts ...ConfigurationOption) (SDK, error) { + o := configOptions{} + for _, opt := range opts { + o = opt.apply(o) + } + if o.opentelemetryConfig.Disabled != nil && *o.opentelemetryConfig.Disabled { + return noopSDK, nil + } + + resource, ok := o.opentelemetryConfig.Resource.(ResourceJson) + if !ok { + return noopSDK, errors.New("invalid resource") + } + r := newResource(&resource) + + mp, mpShutdown, err := meterProvider(o, r) + if err != nil { + return noopSDK, err + } + + tp, tpShutdown, err := tracerProvider(o, r) + if err != nil { + return noopSDK, err + } + + lp, lpShutdown, err := loggerProvider(o, r) + if err != nil { + return noopSDK, err + } + + return SDK{ + meterProvider: mp, + tracerProvider: tp, + loggerProvider: lp, + shutdown: func(ctx context.Context) error { + return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) + }, + }, nil +} + +// ConfigurationOption configures options for providers. +type ConfigurationOption interface { + apply(configOptions) configOptions +} + +type configurationOptionFunc func(configOptions) configOptions + +func (fn configurationOptionFunc) apply(cfg configOptions) configOptions { + return fn(cfg) +} + +// WithContext sets the context.Context for the SDK. +func WithContext(ctx context.Context) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.ctx = ctx + return c + }) +} + +// WithOpenTelemetryConfiguration sets the OpenTelemetryConfiguration used +// to produce the SDK. +func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.opentelemetryConfig = cfg + return c + }) +} + +// ParseYAML parses a YAML configuration file into an OpenTelemetryConfiguration. +func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { + var cfg OpenTelemetryConfiguration + err := yaml.Unmarshal(file, &cfg) + if err != nil { + return nil, err + } + + return &cfg, nil +} + +// createTLSConfig creates a tls.Config from certificate files. +func createTLSConfig(caCertFile *string, clientCertFile *string, clientKeyFile *string) (*tls.Config, error) { + tlsConfig := &tls.Config{} + if caCertFile != nil { + caText, err := os.ReadFile(*caCertFile) + if err != nil { + return nil, err + } + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(caText) { + return nil, errors.New("could not create certificate authority chain from certificate") + } + tlsConfig.RootCAs = certPool + } + if clientCertFile != nil { + if clientKeyFile == nil { + return nil, errors.New("client certificate was provided but no client key was provided") + } + clientCert, err := tls.LoadX509KeyPair(*clientCertFile, *clientKeyFile) + if err != nil { + return nil, fmt.Errorf("could not use client certificate: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{clientCert} + } + return tlsConfig, nil +} + +// createHeadersConfig combines the two header config fields. Headers take precedence over headersList. +func createHeadersConfig(headers []NameStringValuePair, headersList *string) (map[string]string, error) { + result := make(map[string]string) + if headersList != nil { + // Parsing follows https://github.com/open-telemetry/opentelemetry-configuration/blob/568e5080816d40d75792eb754fc96bde09654159/schema/type_descriptions.yaml#L584. + headerslist, err := baggage.Parse(*headersList) + if err != nil { + return nil, fmt.Errorf("invalid headers list: %w", err) + } + for _, kv := range headerslist.Members() { + result[kv.Key()] = kv.Value() + } + } + // Headers take precedence over HeadersList, so this has to be after HeadersList is processed + if len(headers) > 0 { + for _, kv := range headers { + if kv.Value != nil { + result[kv.Name] = *kv.Value + } + } + } + return result, nil +} diff --git a/otelconf/v0.4.0/config_json.go b/otelconf/v0.4.0/config_json.go new file mode 100644 index 00000000000..8693279292e --- /dev/null +++ b/otelconf/v0.4.0/config_json.go @@ -0,0 +1,424 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.3.0" + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" +) + +// MarshalJSON implements json.Marshaler. +func (j *AttributeType) MarshalJSON() ([]byte, error) { + return json.Marshal(j.Value) +} + +var enumValuesAttributeNameValueType = []interface{}{ + nil, + "string", + "bool", + "int", + "double", + "string_array", + "bool_array", + "int_array", + "double_array", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *AttributeType) UnmarshalJSON(b []byte) error { + var v struct { + Value interface{} + } + if err := json.Unmarshal(b, &v.Value); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesAttributeNameValueType { + if reflect.DeepEqual(v.Value, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesAttributeNameValueType, v.Value) + } + *j = AttributeType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return errors.New("field exporter in BatchLogRecordProcessor: required") + } + type Plain BatchLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchLogRecordProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return errors.New("field exporter in BatchSpanProcessor: required") + } + type Plain BatchSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExperimentalPeerInstrumentationServiceMappingElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["peer"]; raw != nil && !ok { + return errors.New("field peer in ExperimentalPeerInstrumentationServiceMappingElem: required") + } + if _, ok := raw["service"]; raw != nil && !ok { + return errors.New("field service in ExperimentalPeerInstrumentationServiceMappingElem: required") + } + type Plain ExperimentalPeerInstrumentationServiceMappingElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = ExperimentalPeerInstrumentationServiceMappingElem(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *NameStringValuePair) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["name"]; !ok { + return errors.New("json: cannot unmarshal field name in NameStringValuePair required") + } + if _, ok := raw["value"]; !ok { + return errors.New("json: cannot unmarshal field value in NameStringValuePair required") + } + var name, value string + var ok bool + if name, ok = raw["name"].(string); !ok { + return errors.New("yaml: cannot unmarshal field name in NameStringValuePair must be string") + } + if value, ok = raw["value"].(string); !ok { + return errors.New("yaml: cannot unmarshal field value in NameStringValuePair must be string") + } + + *j = NameStringValuePair{ + Name: name, + Value: &value, + } + return nil +} + +var enumValuesOTLPMetricDefaultHistogramAggregation = []interface{}{ + "explicit_bucket_histogram", + "base2_exponential_bucket_histogram", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExporterDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesOTLPMetricDefaultHistogramAggregation { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesOTLPMetricDefaultHistogramAggregation, v) + } + *j = ExporterDefaultHistogramAggregation(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPHttpMetricExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return errors.New("field endpoint in OTLPMetric: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return errors.New("field protocol in OTLPMetric: required") + } + type Plain OTLPHttpMetricExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPHttpMetricExporter(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPGrpcMetricExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return errors.New("field endpoint in OTLPMetric: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return errors.New("field protocol in OTLPMetric: required") + } + type Plain OTLPGrpcMetricExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPGrpcMetricExporter(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPHttpExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return errors.New("field endpoint in OTLP: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return errors.New("field protocol in OTLP: required") + } + type Plain OTLPHttpExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPHttpExporter(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPGrpcExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return errors.New("field endpoint in OTLP: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return errors.New("field protocol in OTLP: required") + } + type Plain OTLPGrpcExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPGrpcExporter(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["file_format"]; raw != nil && !ok { + return errors.New("field file_format in OpenTelemetryConfiguration: required") + } + type Plain OpenTelemetryConfiguration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OpenTelemetryConfiguration(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return errors.New("field exporter in PeriodicMetricReader: required") + } + type Plain PeriodicMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PeriodicMetricReader(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PullMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return errors.New("field exporter in PullMetricReader: required") + } + type Plain PullMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PullMetricReader(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return errors.New("field exporter in SimpleLogRecordProcessor: required") + } + type Plain SimpleLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleLogRecordProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return errors.New("field exporter in SimpleSpanProcessor: required") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleSpanProcessor(plain) + return nil +} + +var enumValuesViewSelectorInstrumentType = []interface{}{ + "counter", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *InstrumentType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesViewSelectorInstrumentType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesViewSelectorInstrumentType, v) + } + *j = InstrumentType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ZipkinSpanExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return errors.New("field endpoint in Zipkin: required") + } + type Plain ZipkinSpanExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = ZipkinSpanExporter(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *AttributeNameValue) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["name"]; raw != nil && !ok { + return errors.New("field name in AttributeNameValue: required") + } + if _, ok := raw["value"]; raw != nil && !ok { + return errors.New("field value in AttributeNameValue: required") + } + type Plain AttributeNameValue + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.Type != nil && plain.Type.Value == "int" { + val, ok := plain.Value.(float64) + if ok { + plain.Value = int(val) + } + } + if plain.Type != nil && plain.Type.Value == "int_array" { + m, ok := plain.Value.([]interface{}) + if ok { + var vals []interface{} + for _, v := range m { + val, ok := v.(float64) + if ok { + vals = append(vals, int(val)) + } else { + vals = append(vals, val) + } + } + plain.Value = vals + } + } + + *j = AttributeNameValue(plain) + return nil +} diff --git a/otelconf/v0.4.0/config_test.go b/otelconf/v0.4.0/config_test.go new file mode 100644 index 00000000000..91593803a23 --- /dev/null +++ b/otelconf/v0.4.0/config_test.go @@ -0,0 +1,654 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + lognoop "go.opentelemetry.io/otel/log/noop" + metricnoop "go.opentelemetry.io/otel/metric/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + tracenoop "go.opentelemetry.io/otel/trace/noop" +) + +func TestNewSDK(t *testing.T) { + tests := []struct { + name string + cfg []ConfigurationOption + wantTracerProvider any + wantMeterProvider any + wantLoggerProvider any + wantErr error + wantShutdownErr error + }{ + { + name: "no-configuration", + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + { + name: "with-configuration", + cfg: []ConfigurationOption{ + WithContext(context.Background()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: &TracerProviderJson{}, + MeterProvider: &MeterProviderJson{}, + LoggerProvider: &LoggerProviderJson{}, + }), + }, + wantTracerProvider: &sdktrace.TracerProvider{}, + wantMeterProvider: &sdkmetric.MeterProvider{}, + wantLoggerProvider: &sdklog.LoggerProvider{}, + }, + { + name: "with-sdk-disabled", + cfg: []ConfigurationOption{ + WithContext(context.Background()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + Disabled: ptr(true), + TracerProvider: &TracerProviderJson{}, + MeterProvider: &MeterProviderJson{}, + LoggerProvider: &LoggerProviderJson{}, + }), + }, + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + }, + } + for _, tt := range tests { + sdk, err := NewSDK(tt.cfg...) + require.Equal(t, tt.wantErr, err) + assert.IsType(t, tt.wantTracerProvider, sdk.TracerProvider()) + assert.IsType(t, tt.wantMeterProvider, sdk.MeterProvider()) + assert.IsType(t, tt.wantLoggerProvider, sdk.LoggerProvider()) + require.Equal(t, tt.wantShutdownErr, sdk.Shutdown(context.Background())) + } +} + +var v04OpenTelemetryConfig = OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "0.4", + AttributeLimits: &AttributeLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + InstrumentationDevelopment: &InstrumentationJson{ + Cpp: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Dotnet: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Erlang: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + General: &ExperimentalGeneralInstrumentation{ + Http: &ExperimentalHttpInstrumentation{ + Client: &ExperimentalHttpInstrumentationClient{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + Server: &ExperimentalHttpInstrumentationServer{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + }, + Peer: &ExperimentalPeerInstrumentation{ + ServiceMapping: []ExperimentalPeerInstrumentationServiceMappingElem{ + {Peer: "1.2.3.4", Service: "FooService"}, + {Peer: "2.3.4.5", Service: "BarService"}, + }, + }, + }, + Go: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Java: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Js: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Php: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Python: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Ruby: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Rust: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Swift: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + }, + LoggerProvider: &LoggerProviderJson{ + Limits: &LogRecordLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + Processors: []LogRecordProcessor{ + { + Batch: &BatchLogRecordProcessor{ + ExportTimeout: ptr(30000), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Endpoint: ptr("http://localhost:4318/v1/logs"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + }, + MeterProvider: &MeterProviderJson{ + Readers: []MetricReader{ + { + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + Port: ptr(9464), + WithResourceConstantLabels: &IncludeExclude{ + Excluded: []string{"service.attr1"}, + Included: []string{"service*"}, + }, + WithoutScopeInfo: ptr(false), + WithoutTypeSuffix: ptr(false), + WithoutUnits: ptr(false), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + Endpoint: ptr("http://localhost:4318/v1/metrics"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + Timeout: ptr(10000), + }, + }, + Interval: ptr(5000), + Timeout: ptr(30000), + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + Views: []View{ + { + Selector: &ViewSelector{ + InstrumentName: ptr("my-instrument"), + InstrumentType: ptr(InstrumentTypeHistogram), + MeterName: ptr("my-meter"), + MeterSchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + MeterVersion: ptr("1.0.0"), + Unit: ptr("ms"), + }, + Stream: &ViewStream{ + Aggregation: &Aggregation{ + ExplicitBucketHistogram: &ExplicitBucketHistogramAggregation{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + RecordMinMax: ptr(true), + }, + }, + AttributeKeys: &IncludeExclude{ + Included: []string{"key1", "key2"}, + Excluded: []string{"key3"}, + }, + Description: ptr("new_description"), + Name: ptr("new_instrument_name"), + }, + }, + }, + }, + Propagator: &PropagatorJson{ + // Composite: []TextMapPropagator{TraceContextPropagator, ptr("tracecontext"), ptr("baggage"), ptr("b3"), ptr("b3multi"), ptr("jaeger"), ptr("xray"), ptr("ottrace")}, + }, + Resource: &ResourceJson{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "unknown_service"}, + {Name: "string_key", Type: &AttributeType{Value: "string"}, Value: "value"}, + {Name: "bool_key", Type: &AttributeType{Value: "bool"}, Value: true}, + {Name: "int_key", Type: &AttributeType{Value: "int"}, Value: 1}, + {Name: "double_key", Type: &AttributeType{Value: "double"}, Value: 1.1}, + {Name: "string_array_key", Type: &AttributeType{Value: "string_array"}, Value: []interface{}{"value1", "value2"}}, + {Name: "bool_array_key", Type: &AttributeType{Value: "bool_array"}, Value: []interface{}{true, false}}, + {Name: "int_array_key", Type: &AttributeType{Value: "int_array"}, Value: []interface{}{1, 2}}, + {Name: "double_array_key", Type: &AttributeType{Value: "double_array"}, Value: []interface{}{1.1, 2.2}}, + }, + AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), + DetectionDevelopment: &ExperimentalResourceDetection{ + Attributes: &IncludeExclude{ + Excluded: []string{"process.command_args"}, + Included: []string{"process.*"}, + }, + }, + SchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + }, + TracerProvider: &TracerProviderJson{ + Limits: &SpanLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + EventCountLimit: ptr(128), + EventAttributeCountLimit: ptr(128), + LinkCountLimit: ptr(128), + LinkAttributeCountLimit: ptr(128), + }, + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{ + ExportTimeout: ptr(30000), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + CertificateFile: ptr("/app/cert.pem"), + ClientCertificateFile: ptr("/app/cert.pem"), + ClientKeyFile: ptr("/app/cert.pem"), + Compression: ptr("gzip"), + Endpoint: ptr("http://localhost:4318/v1/traces"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + Zipkin: &ZipkinSpanExporter{ + Endpoint: ptr("http://localhost:9411/api/v2/spans"), + Timeout: ptr(10000), + }, + }, + }, + }, + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + Sampler: &Sampler{ + ParentBased: &ParentBasedSampler{ + LocalParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + LocalParentSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + RemoteParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + RemoteParentSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + Root: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.0001), + }, + }, + }, + }, + }, +} + +func TestParseYAML(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType interface{} + }{ + { + name: "valid YAML config", + input: `valid_empty.yaml`, + wantErr: nil, + wantType: &OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "0.1", + }, + }, + { + name: "invalid config", + input: "invalid_bool.yaml", + wantErr: errors.New(`yaml: unmarshal errors: + line 2: cannot unmarshal !!str ` + "`notabool`" + ` into bool`), + }, + { + name: "invalid nil name", + input: "invalid_nil_name.yaml", + wantErr: errors.New(`yaml: cannot unmarshal field name in NameStringValuePair required`), + }, + { + name: "invalid nil value", + input: "invalid_nil_value.yaml", + wantErr: errors.New(`yaml: cannot unmarshal field value in NameStringValuePair required`), + }, + { + name: "valid v0.2 config", + input: "v0.2.yaml", + wantErr: errors.New(`yaml: unmarshal errors: + line 81: cannot unmarshal !!map into []otelconf.NameStringValuePair + line 185: cannot unmarshal !!map into []otelconf.NameStringValuePair + line 244: cannot unmarshal !!seq into otelconf.IncludeExclude + line 305: cannot unmarshal !!map into []otelconf.NameStringValuePair + line 408: cannot unmarshal !!map into []otelconf.AttributeNameValue`), + }, + { + name: "valid v0.4 config", + input: "v0.4.yaml", + wantType: &v04OpenTelemetryConfig, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) + require.NoError(t, err) + + got, err := ParseYAML(b) + if tt.wantErr != nil { + require.Error(t, err) + require.Equal(t, tt.wantErr.Error(), err.Error()) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantType, got) + } + }) + } +} + +func TestSerializeJSON(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType interface{} + }{ + { + name: "valid JSON config", + input: `valid_empty.json`, + wantErr: nil, + wantType: OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "0.1", + }, + }, + { + name: "invalid config", + input: "invalid_bool.json", + wantErr: errors.New(`json: cannot unmarshal string into Go struct field Plain.disabled of type bool`), + }, + { + name: "invalid nil name", + input: "invalid_nil_name.json", + wantErr: errors.New(`json: cannot unmarshal field name in NameStringValuePair required`), + }, + { + name: "invalid nil value", + input: "invalid_nil_value.json", + wantErr: errors.New(`json: cannot unmarshal field value in NameStringValuePair required`), + }, + { + name: "valid v0.2 config", + input: "v0.2.json", + wantErr: errors.New(`json: cannot unmarshal object into Go struct field LogRecordProcessor.logger_provider.processors.batch`), + }, + { + name: "valid v0.4 config", + input: "v0.4.json", + wantType: v04OpenTelemetryConfig, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) + require.NoError(t, err) + + var got OpenTelemetryConfiguration + err = json.Unmarshal(b, &got) + + if tt.wantErr != nil { + require.Error(t, err) + require.ErrorContains(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantType, got) + } + }) + } +} + +func TestCreateTLSConfig(t *testing.T) { + tests := []struct { + name string + caCertFile *string + clientCertFile *string + clientKeyFile *string + wantErrContains string + want func(*tls.Config, *testing.T) + }{ + { + name: "no-input", + want: func(result *tls.Config, t *testing.T) { + require.Nil(t, result.Certificates) + require.Nil(t, result.RootCAs) + }, + }, + { + name: "only-cacert-provided", + caCertFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + want: func(result *tls.Config, t *testing.T) { + require.Nil(t, result.Certificates) + require.NotNil(t, result.RootCAs) + }, + }, + { + name: "nonexistent-cacert-file", + caCertFile: ptr("nowhere.crt"), + wantErrContains: "open nowhere.crt:", + }, + { + name: "nonexistent-clientcert-file", + clientCertFile: ptr("nowhere.crt"), + clientKeyFile: ptr("nowhere.crt"), + wantErrContains: "could not use client certificate: open nowhere.crt:", + }, + { + name: "bad-cacert-file", + caCertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + wantErrContains: "could not create certificate authority chain from certificate", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := createTLSConfig(tt.caCertFile, tt.clientCertFile, tt.clientKeyFile) + + if tt.wantErrContains != "" { + require.Contains(t, err.Error(), tt.wantErrContains) + } else { + require.NoError(t, err) + tt.want(got, t) + } + }) + } +} + +func TestCreateHeadersConfig(t *testing.T) { + tests := []struct { + name string + headers []NameStringValuePair + headersList *string + wantHeaders map[string]string + wantErr string + }{ + { + name: "no headers", + headers: []NameStringValuePair{}, + headersList: nil, + wantHeaders: map[string]string{}, + }, + { + name: "headerslist only", + headers: []NameStringValuePair{}, + headersList: ptr("a=b,c=d"), + wantHeaders: map[string]string{ + "a": "b", + "c": "d", + }, + }, + { + name: "headers only", + headers: []NameStringValuePair{ + { + Name: "a", + Value: ptr("b"), + }, + { + Name: "c", + Value: ptr("d"), + }, + }, + headersList: nil, + wantHeaders: map[string]string{ + "a": "b", + "c": "d", + }, + }, + { + name: "both headers and headerslist", + headers: []NameStringValuePair{ + { + Name: "a", + Value: ptr("b"), + }, + }, + headersList: ptr("c=d"), + wantHeaders: map[string]string{ + "a": "b", + "c": "d", + }, + }, + { + name: "headers supersedes headerslist", + headers: []NameStringValuePair{ + { + Name: "a", + Value: ptr("b"), + }, + { + Name: "c", + Value: ptr("override"), + }, + }, + headersList: ptr("c=d"), + wantHeaders: map[string]string{ + "a": "b", + "c": "override", + }, + }, + { + name: "invalid headerslist", + headersList: ptr("==="), + wantErr: "invalid headers list: invalid key: \"\"", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + headersMap, err := createHeadersConfig(tt.headers, tt.headersList) + if tt.wantErr != "" { + require.Error(t, err) + require.Equal(t, tt.wantErr, err.Error()) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.wantHeaders, headersMap) + }) + } +} + +func ptr[T any](v T) *T { + return &v +} diff --git a/otelconf/v0.4.0/config_yaml.go b/otelconf/v0.4.0/config_yaml.go new file mode 100644 index 00000000000..184a11f4da6 --- /dev/null +++ b/otelconf/v0.4.0/config_yaml.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.3.0" + +import ( + "errors" + "fmt" + "reflect" +) + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *AttributeType) UnmarshalYAML(unmarshal func(interface{}) error) error { + var v struct { + Value interface{} + } + if err := unmarshal(&v.Value); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesAttributeNameValueType { + if reflect.DeepEqual(v.Value, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesAttributeNameValueType, v.Value) + } + *j = AttributeType(v) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *NameStringValuePair) UnmarshalYAML(unmarshal func(interface{}) error) error { + var raw map[string]interface{} + if err := unmarshal(&raw); err != nil { + return err + } + if _, ok := raw["name"]; !ok { + return errors.New("yaml: cannot unmarshal field name in NameStringValuePair required") + } + if _, ok := raw["value"]; !ok { + return errors.New("yaml: cannot unmarshal field value in NameStringValuePair required") + } + var name, value string + var ok bool + if name, ok = raw["name"].(string); !ok { + return errors.New("yaml: cannot unmarshal field name in NameStringValuePair must be string") + } + if value, ok = raw["value"].(string); !ok { + return errors.New("yaml: cannot unmarshal field value in NameStringValuePair must be string") + } + *j = NameStringValuePair{ + Name: name, + Value: &value, + } + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *ExperimentalLanguageSpecificInstrumentation) UnmarshalYAML(unmarshal func(interface{}) error) error { + var raw map[string]interface{} + if err := unmarshal(&raw); err != nil { + return err + } + + *j = raw + return nil +} diff --git a/otelconf/v0.4.0/fuzz_test.go b/otelconf/v0.4.0/fuzz_test.go new file mode 100644 index 00000000000..4bd5c4edf1f --- /dev/null +++ b/otelconf/v0.4.0/fuzz_test.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func FuzzJSON(f *testing.F) { + b, err := os.ReadFile(filepath.Join("..", "testdata", "v0.3.json")) + require.NoError(f, err) + f.Add(b) + + f.Fuzz(func(t *testing.T, data []byte) { + t.Log("JSON:\n" + string(data)) + + var cfg OpenTelemetryConfiguration + err := json.Unmarshal(b, &cfg) + if err != nil { + return + } + + sdk, err := NewSDK(WithOpenTelemetryConfiguration(cfg)) + if err != nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + _ = sdk.Shutdown(ctx) + }) +} + +func FuzzYAML(f *testing.F) { + b, err := os.ReadFile(filepath.Join("..", "testdata", "v0.3.yaml")) + require.NoError(f, err) + f.Add(b) + + f.Fuzz(func(t *testing.T, data []byte) { + t.Log("YAML:\n" + string(data)) + + cfg, err := ParseYAML(data) + if err != nil { + return + } + + sdk, err := NewSDK(WithOpenTelemetryConfiguration(*cfg)) + if err != nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + _ = sdk.Shutdown(ctx) + }) +} diff --git a/otelconf/v0.4.0/generated_config.go b/otelconf/v0.4.0/generated_config.go new file mode 100644 index 00000000000..d4cdde95782 --- /dev/null +++ b/otelconf/v0.4.0/generated_config.go @@ -0,0 +1,959 @@ +// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. + +package otelconf + +type Aggregation struct { + // Base2ExponentialBucketHistogram corresponds to the JSON schema field + // "base2_exponential_bucket_histogram". + Base2ExponentialBucketHistogram *Base2ExponentialBucketHistogramAggregation `json:"base2_exponential_bucket_histogram,omitempty" yaml:"base2_exponential_bucket_histogram,omitempty" mapstructure:"base2_exponential_bucket_histogram,omitempty"` + + // Default corresponds to the JSON schema field "default". + Default DefaultAggregation `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` + + // Drop corresponds to the JSON schema field "drop". + Drop DropAggregation `json:"drop,omitempty" yaml:"drop,omitempty" mapstructure:"drop,omitempty"` + + // ExplicitBucketHistogram corresponds to the JSON schema field + // "explicit_bucket_histogram". + ExplicitBucketHistogram *ExplicitBucketHistogramAggregation `json:"explicit_bucket_histogram,omitempty" yaml:"explicit_bucket_histogram,omitempty" mapstructure:"explicit_bucket_histogram,omitempty"` + + // LastValue corresponds to the JSON schema field "last_value". + LastValue LastValueAggregation `json:"last_value,omitempty" yaml:"last_value,omitempty" mapstructure:"last_value,omitempty"` + + // Sum corresponds to the JSON schema field "sum". + Sum SumAggregation `json:"sum,omitempty" yaml:"sum,omitempty" mapstructure:"sum,omitempty"` +} + +type AlwaysOffSampler map[string]interface{} + +type AlwaysOnSampler map[string]interface{} + +type AttributeLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type AttributeNameValue struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Type corresponds to the JSON schema field "type". + Type *AttributeType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value interface{} `json:"value" yaml:"value" mapstructure:"value"` +} + +type AttributeType struct { + Value interface{} +} + +type B3MultiPropagator map[string]interface{} + +type B3Propagator map[string]interface{} + +type BaggagePropagator map[string]interface{} + +type Base2ExponentialBucketHistogramAggregation struct { + // MaxScale corresponds to the JSON schema field "max_scale". + MaxScale *int `json:"max_scale,omitempty" yaml:"max_scale,omitempty" mapstructure:"max_scale,omitempty"` + + // MaxSize corresponds to the JSON schema field "max_size". + MaxSize *int `json:"max_size,omitempty" yaml:"max_size,omitempty" mapstructure:"max_size,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +type BatchLogRecordProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +type BatchSpanProcessor struct { + // ExportTimeout corresponds to the JSON schema field "export_timeout". + ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // MaxExportBatchSize corresponds to the JSON schema field + // "max_export_batch_size". + MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // MaxQueueSize corresponds to the JSON schema field "max_queue_size". + MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // ScheduleDelay corresponds to the JSON schema field "schedule_delay". + ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +type CardinalityLimits struct { + // Counter corresponds to the JSON schema field "counter". + Counter *int `json:"counter,omitempty" yaml:"counter,omitempty" mapstructure:"counter,omitempty"` + + // Default corresponds to the JSON schema field "default". + Default *int `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` + + // Gauge corresponds to the JSON schema field "gauge". + Gauge *int `json:"gauge,omitempty" yaml:"gauge,omitempty" mapstructure:"gauge,omitempty"` + + // Histogram corresponds to the JSON schema field "histogram". + Histogram *int `json:"histogram,omitempty" yaml:"histogram,omitempty" mapstructure:"histogram,omitempty"` + + // ObservableCounter corresponds to the JSON schema field "observable_counter". + ObservableCounter *int `json:"observable_counter,omitempty" yaml:"observable_counter,omitempty" mapstructure:"observable_counter,omitempty"` + + // ObservableGauge corresponds to the JSON schema field "observable_gauge". + ObservableGauge *int `json:"observable_gauge,omitempty" yaml:"observable_gauge,omitempty" mapstructure:"observable_gauge,omitempty"` + + // ObservableUpDownCounter corresponds to the JSON schema field + // "observable_up_down_counter". + ObservableUpDownCounter *int `json:"observable_up_down_counter,omitempty" yaml:"observable_up_down_counter,omitempty" mapstructure:"observable_up_down_counter,omitempty"` + + // UpDownCounter corresponds to the JSON schema field "up_down_counter". + UpDownCounter *int `json:"up_down_counter,omitempty" yaml:"up_down_counter,omitempty" mapstructure:"up_down_counter,omitempty"` +} + +type ConsoleExporter map[string]interface{} + +type DefaultAggregation map[string]interface{} + +type DropAggregation map[string]interface{} + +type ExemplarFilter string + +const ExemplarFilterAlwaysOff ExemplarFilter = "always_off" +const ExemplarFilterAlwaysOn ExemplarFilter = "always_on" +const ExemplarFilterTraceBased ExemplarFilter = "trace_based" + +type ExperimentalGeneralInstrumentation struct { + // Http corresponds to the JSON schema field "http". + Http *ExperimentalHttpInstrumentation `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + + // Peer corresponds to the JSON schema field "peer". + Peer *ExperimentalPeerInstrumentation `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` +} + +type ExperimentalHttpInstrumentation struct { + // Client corresponds to the JSON schema field "client". + Client *ExperimentalHttpInstrumentationClient `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` + + // Server corresponds to the JSON schema field "server". + Server *ExperimentalHttpInstrumentationServer `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` +} + +type ExperimentalHttpInstrumentationClient struct { + // RequestCapturedHeaders corresponds to the JSON schema field + // "request_captured_headers". + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // ResponseCapturedHeaders corresponds to the JSON schema field + // "response_captured_headers". + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type ExperimentalHttpInstrumentationServer struct { + // RequestCapturedHeaders corresponds to the JSON schema field + // "request_captured_headers". + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // ResponseCapturedHeaders corresponds to the JSON schema field + // "response_captured_headers". + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type ExperimentalLanguageSpecificInstrumentation map[string]interface{} + +type ExperimentalLoggerConfig struct { + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` +} + +type ExperimentalLoggerConfigurator struct { + // DefaultConfig corresponds to the JSON schema field "default_config". + DefaultConfig *ExperimentalLoggerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Loggers corresponds to the JSON schema field "loggers". + Loggers []ExperimentalLoggerMatcherAndConfig `json:"loggers,omitempty" yaml:"loggers,omitempty" mapstructure:"loggers,omitempty"` +} + +type ExperimentalLoggerMatcherAndConfig struct { + // Config corresponds to the JSON schema field "config". + Config *ExperimentalLoggerConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ExperimentalMeterConfig struct { + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` +} + +type ExperimentalMeterConfigurator struct { + // DefaultConfig corresponds to the JSON schema field "default_config". + DefaultConfig *ExperimentalMeterConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Meters corresponds to the JSON schema field "meters". + Meters []ExperimentalMeterMatcherAndConfig `json:"meters,omitempty" yaml:"meters,omitempty" mapstructure:"meters,omitempty"` +} + +type ExperimentalMeterMatcherAndConfig struct { + // Config corresponds to the JSON schema field "config". + Config *ExperimentalMeterConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ExperimentalOTLPFileExporter struct { + // OutputStream corresponds to the JSON schema field "output_stream". + OutputStream *string `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` +} + +type ExperimentalOTLPFileMetricExporter struct { + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // OutputStream corresponds to the JSON schema field "output_stream". + OutputStream *string `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` +} + +type ExperimentalPeerInstrumentation struct { + // ServiceMapping corresponds to the JSON schema field "service_mapping". + ServiceMapping []ExperimentalPeerInstrumentationServiceMappingElem `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` +} + +type ExperimentalPeerInstrumentationServiceMappingElem struct { + // Peer corresponds to the JSON schema field "peer". + Peer string `json:"peer" yaml:"peer" mapstructure:"peer"` + + // Service corresponds to the JSON schema field "service". + Service string `json:"service" yaml:"service" mapstructure:"service"` +} + +type ExperimentalPrometheusMetricExporter struct { + // Host corresponds to the JSON schema field "host". + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Port corresponds to the JSON schema field "port". + Port *int `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // WithResourceConstantLabels corresponds to the JSON schema field + // "with_resource_constant_labels". + WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` + + // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". + WithoutScopeInfo *bool `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` + + // WithoutTypeSuffix corresponds to the JSON schema field "without_type_suffix". + WithoutTypeSuffix *bool `json:"without_type_suffix,omitempty" yaml:"without_type_suffix,omitempty" mapstructure:"without_type_suffix,omitempty"` + + // WithoutUnits corresponds to the JSON schema field "without_units". + WithoutUnits *bool `json:"without_units,omitempty" yaml:"without_units,omitempty" mapstructure:"without_units,omitempty"` +} + +type ExperimentalResourceDetection struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes *IncludeExclude `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // Detectors corresponds to the JSON schema field "detectors". + Detectors []ExperimentalResourceDetector `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` +} + +type ExperimentalResourceDetector map[string]interface{} + +type ExperimentalTracerConfig struct { + // Disabled corresponds to the JSON schema field "disabled". + Disabled bool `json:"disabled" yaml:"disabled" mapstructure:"disabled"` +} + +type ExperimentalTracerConfigurator struct { + // DefaultConfig corresponds to the JSON schema field "default_config". + DefaultConfig *ExperimentalTracerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Tracers corresponds to the JSON schema field "tracers". + Tracers []ExperimentalTracerMatcherAndConfig `json:"tracers,omitempty" yaml:"tracers,omitempty" mapstructure:"tracers,omitempty"` +} + +type ExperimentalTracerMatcherAndConfig struct { + // Config corresponds to the JSON schema field "config". + Config ExperimentalTracerConfig `json:"config" yaml:"config" mapstructure:"config"` + + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type ExplicitBucketHistogramAggregation struct { + // Boundaries corresponds to the JSON schema field "boundaries". + Boundaries []float64 `json:"boundaries,omitempty" yaml:"boundaries,omitempty" mapstructure:"boundaries,omitempty"` + + // RecordMinMax corresponds to the JSON schema field "record_min_max". + RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +type ExporterDefaultHistogramAggregation string + +const ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram ExporterDefaultHistogramAggregation = "base2_exponential_bucket_histogram" +const ExporterDefaultHistogramAggregationExplicitBucketHistogram ExporterDefaultHistogramAggregation = "explicit_bucket_histogram" + +type ExporterTemporalityPreference string + +const ExporterTemporalityPreferenceCumulative ExporterTemporalityPreference = "cumulative" +const ExporterTemporalityPreferenceDelta ExporterTemporalityPreference = "delta" +const ExporterTemporalityPreferenceLowMemory ExporterTemporalityPreference = "low_memory" + +type IncludeExclude struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` +} + +type InstrumentType string + +const InstrumentTypeCounter InstrumentType = "counter" +const InstrumentTypeGauge InstrumentType = "gauge" +const InstrumentTypeHistogram InstrumentType = "histogram" +const InstrumentTypeObservableCounter InstrumentType = "observable_counter" +const InstrumentTypeObservableGauge InstrumentType = "observable_gauge" +const InstrumentTypeObservableUpDownCounter InstrumentType = "observable_up_down_counter" +const InstrumentTypeUpDownCounter InstrumentType = "up_down_counter" + +type InstrumentationJson struct { + // Cpp corresponds to the JSON schema field "cpp". + Cpp ExperimentalLanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` + + // Dotnet corresponds to the JSON schema field "dotnet". + Dotnet ExperimentalLanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` + + // Erlang corresponds to the JSON schema field "erlang". + Erlang ExperimentalLanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` + + // General corresponds to the JSON schema field "general". + General *ExperimentalGeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` + + // Go corresponds to the JSON schema field "go". + Go ExperimentalLanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` + + // Java corresponds to the JSON schema field "java". + Java ExperimentalLanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` + + // Js corresponds to the JSON schema field "js". + Js ExperimentalLanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` + + // Php corresponds to the JSON schema field "php". + Php ExperimentalLanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` + + // Python corresponds to the JSON schema field "python". + Python ExperimentalLanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` + + // Ruby corresponds to the JSON schema field "ruby". + Ruby ExperimentalLanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` + + // Rust corresponds to the JSON schema field "rust". + Rust ExperimentalLanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` + + // Swift corresponds to the JSON schema field "swift". + Swift ExperimentalLanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` +} + +type JaegerPropagator map[string]interface{} + +type JaegerRemoteSampler struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // InitialSampler corresponds to the JSON schema field "initial_sampler". + InitialSampler *Sampler `json:"initial_sampler,omitempty" yaml:"initial_sampler,omitempty" mapstructure:"initial_sampler,omitempty"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` +} + +type LastValueAggregation map[string]interface{} + +type LogRecordExporter struct { + // Console corresponds to the JSON schema field "console". + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLPFileDevelopment corresponds to the JSON schema field + // "otlp_file/development". + OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". + OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // OTLPHttp corresponds to the JSON schema field "otlp_http". + OTLPHttp *OTLPHttpExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type LogRecordLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` +} + +type LogRecordProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchLogRecordProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleLogRecordProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type LoggerProviderJson struct { + // Limits corresponds to the JSON schema field "limits". + Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // LoggerConfiguratorDevelopment corresponds to the JSON schema field + // "logger_configurator/development". + LoggerConfiguratorDevelopment *ExperimentalLoggerConfigurator `json:"logger_configurator/development,omitempty" yaml:"logger_configurator/development,omitempty" mapstructure:"logger_configurator/development,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []LogRecordProcessor `json:"processors" yaml:"processors" mapstructure:"processors"` +} + +type MeterProviderJson struct { + // ExemplarFilter corresponds to the JSON schema field "exemplar_filter". + ExemplarFilter *ExemplarFilter `json:"exemplar_filter,omitempty" yaml:"exemplar_filter,omitempty" mapstructure:"exemplar_filter,omitempty"` + + // MeterConfiguratorDevelopment corresponds to the JSON schema field + // "meter_configurator/development". + MeterConfiguratorDevelopment *ExperimentalMeterConfigurator `json:"meter_configurator/development,omitempty" yaml:"meter_configurator/development,omitempty" mapstructure:"meter_configurator/development,omitempty"` + + // Readers corresponds to the JSON schema field "readers". + Readers []MetricReader `json:"readers" yaml:"readers" mapstructure:"readers"` + + // Views corresponds to the JSON schema field "views". + Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` +} + +type MetricProducer struct { + // Opencensus corresponds to the JSON schema field "opencensus". + Opencensus OpenCensusMetricProducer `json:"opencensus,omitempty" yaml:"opencensus,omitempty" mapstructure:"opencensus,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type MetricReader struct { + // Periodic corresponds to the JSON schema field "periodic". + Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` + + // Pull corresponds to the JSON schema field "pull". + Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` +} + +type NameStringValuePair struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Value corresponds to the JSON schema field "value". + Value *string `json:"value" yaml:"value" mapstructure:"value"` +} + +type OTLPGrpcExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPGrpcMetricExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPHttpEncoding string + +const OTLPHttpEncodingJson OTLPHttpEncoding = "json" +const OTLPHttpEncodingProtobuf OTLPHttpEncoding = "protobuf" + +type OTLPHttpExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Encoding corresponds to the JSON schema field "encoding". + Encoding *OTLPHttpEncoding `json:"encoding,omitempty" yaml:"encoding,omitempty" mapstructure:"encoding,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OTLPHttpMetricExporter struct { + // CertificateFile corresponds to the JSON schema field "certificate_file". + CertificateFile *string `json:"certificate_file,omitempty" yaml:"certificate_file,omitempty" mapstructure:"certificate_file,omitempty"` + + // ClientCertificateFile corresponds to the JSON schema field + // "client_certificate_file". + ClientCertificateFile *string `json:"client_certificate_file,omitempty" yaml:"client_certificate_file,omitempty" mapstructure:"client_certificate_file,omitempty"` + + // ClientKeyFile corresponds to the JSON schema field "client_key_file". + ClientKeyFile *string `json:"client_key_file,omitempty" yaml:"client_key_file,omitempty" mapstructure:"client_key_file,omitempty"` + + // Compression corresponds to the JSON schema field "compression". + Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // DefaultHistogramAggregation corresponds to the JSON schema field + // "default_histogram_aggregation". + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Encoding corresponds to the JSON schema field "encoding". + Encoding *OTLPHttpEncoding `json:"encoding,omitempty" yaml:"encoding,omitempty" mapstructure:"encoding,omitempty"` + + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Headers corresponds to the JSON schema field "headers". + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // TemporalityPreference corresponds to the JSON schema field + // "temporality_preference". + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type OpenCensusMetricProducer map[string]interface{} + +type OpenTelemetryConfiguration struct { + // AttributeLimits corresponds to the JSON schema field "attribute_limits". + AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` + + // Disabled corresponds to the JSON schema field "disabled". + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` + + // FileFormat corresponds to the JSON schema field "file_format". + FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` + + // InstrumentationDevelopment corresponds to the JSON schema field + // "instrumentation/development". + InstrumentationDevelopment OpenTelemetryConfigurationInstrumentationDevelopment `json:"instrumentation/development,omitempty" yaml:"instrumentation/development,omitempty" mapstructure:"instrumentation/development,omitempty"` + + // LogLevel corresponds to the JSON schema field "log_level". + LogLevel *string `json:"log_level,omitempty" yaml:"log_level,omitempty" mapstructure:"log_level,omitempty"` + + // LoggerProvider corresponds to the JSON schema field "logger_provider". + LoggerProvider OpenTelemetryConfigurationLoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` + + // MeterProvider corresponds to the JSON schema field "meter_provider". + MeterProvider OpenTelemetryConfigurationMeterProvider `json:"meter_provider,omitempty" yaml:"meter_provider,omitempty" mapstructure:"meter_provider,omitempty"` + + // Propagator corresponds to the JSON schema field "propagator". + Propagator OpenTelemetryConfigurationPropagator `json:"propagator,omitempty" yaml:"propagator,omitempty" mapstructure:"propagator,omitempty"` + + // Resource corresponds to the JSON schema field "resource". + Resource OpenTelemetryConfigurationResource `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` + + // TracerProvider corresponds to the JSON schema field "tracer_provider". + TracerProvider OpenTelemetryConfigurationTracerProvider `json:"tracer_provider,omitempty" yaml:"tracer_provider,omitempty" mapstructure:"tracer_provider,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type OpenTelemetryConfigurationInstrumentationDevelopment interface{} + +type OpenTelemetryConfigurationLoggerProvider interface{} + +type OpenTelemetryConfigurationMeterProvider interface{} + +type OpenTelemetryConfigurationPropagator interface{} + +type OpenTelemetryConfigurationResource interface{} + +type OpenTelemetryConfigurationTracerProvider interface{} + +type OpenTracingPropagator map[string]interface{} + +type ParentBasedSampler struct { + // LocalParentNotSampled corresponds to the JSON schema field + // "local_parent_not_sampled". + LocalParentNotSampled *Sampler `json:"local_parent_not_sampled,omitempty" yaml:"local_parent_not_sampled,omitempty" mapstructure:"local_parent_not_sampled,omitempty"` + + // LocalParentSampled corresponds to the JSON schema field "local_parent_sampled". + LocalParentSampled *Sampler `json:"local_parent_sampled,omitempty" yaml:"local_parent_sampled,omitempty" mapstructure:"local_parent_sampled,omitempty"` + + // RemoteParentNotSampled corresponds to the JSON schema field + // "remote_parent_not_sampled". + RemoteParentNotSampled *Sampler `json:"remote_parent_not_sampled,omitempty" yaml:"remote_parent_not_sampled,omitempty" mapstructure:"remote_parent_not_sampled,omitempty"` + + // RemoteParentSampled corresponds to the JSON schema field + // "remote_parent_sampled". + RemoteParentSampled *Sampler `json:"remote_parent_sampled,omitempty" yaml:"remote_parent_sampled,omitempty" mapstructure:"remote_parent_sampled,omitempty"` + + // Root corresponds to the JSON schema field "root". + Root *Sampler `json:"root,omitempty" yaml:"root,omitempty" mapstructure:"root,omitempty"` +} + +type PeriodicMetricReader struct { + // CardinalityLimits corresponds to the JSON schema field "cardinality_limits". + CardinalityLimits *CardinalityLimits `json:"cardinality_limits,omitempty" yaml:"cardinality_limits,omitempty" mapstructure:"cardinality_limits,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter PushMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Interval corresponds to the JSON schema field "interval". + Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` + + // Producers corresponds to the JSON schema field "producers". + Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +type PropagatorJson struct { + // Composite corresponds to the JSON schema field "composite". + Composite []TextMapPropagator `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` + + // CompositeList corresponds to the JSON schema field "composite_list". + CompositeList *string `json:"composite_list,omitempty" yaml:"composite_list,omitempty" mapstructure:"composite_list,omitempty"` +} + +type PullMetricExporter struct { + // PrometheusDevelopment corresponds to the JSON schema field + // "prometheus/development". + PrometheusDevelopment *ExperimentalPrometheusMetricExporter `json:"prometheus/development,omitempty" yaml:"prometheus/development,omitempty" mapstructure:"prometheus/development,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type PullMetricReader struct { + // CardinalityLimits corresponds to the JSON schema field "cardinality_limits". + CardinalityLimits *CardinalityLimits `json:"cardinality_limits,omitempty" yaml:"cardinality_limits,omitempty" mapstructure:"cardinality_limits,omitempty"` + + // Exporter corresponds to the JSON schema field "exporter". + Exporter PullMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Producers corresponds to the JSON schema field "producers". + Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` +} + +type PushMetricExporter struct { + // Console corresponds to the JSON schema field "console". + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLPFileDevelopment corresponds to the JSON schema field + // "otlp_file/development". + OTLPFileDevelopment *ExperimentalOTLPFileMetricExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". + OTLPGrpc *OTLPGrpcMetricExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // OTLPHttp corresponds to the JSON schema field "otlp_http". + OTLPHttp *OTLPHttpMetricExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type ResourceJson struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes []AttributeNameValue `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // AttributesList corresponds to the JSON schema field "attributes_list". + AttributesList *string `json:"attributes_list,omitempty" yaml:"attributes_list,omitempty" mapstructure:"attributes_list,omitempty"` + + // DetectionDevelopment corresponds to the JSON schema field + // "detection/development". + DetectionDevelopment *ExperimentalResourceDetection `json:"detection/development,omitempty" yaml:"detection/development,omitempty" mapstructure:"detection/development,omitempty"` + + // SchemaUrl corresponds to the JSON schema field "schema_url". + SchemaUrl *string `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` +} + +type Sampler struct { + // AlwaysOff corresponds to the JSON schema field "always_off". + AlwaysOff AlwaysOffSampler `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` + + // AlwaysOn corresponds to the JSON schema field "always_on". + AlwaysOn AlwaysOnSampler `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` + + // JaegerRemote corresponds to the JSON schema field "jaeger_remote". + JaegerRemote *JaegerRemoteSampler `json:"jaeger_remote,omitempty" yaml:"jaeger_remote,omitempty" mapstructure:"jaeger_remote,omitempty"` + + // ParentBased corresponds to the JSON schema field "parent_based". + ParentBased *ParentBasedSampler `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` + + // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". + TraceIDRatioBased *TraceIDRatioBasedSampler `json:"trace_id_ratio_based,omitempty" yaml:"trace_id_ratio_based,omitempty" mapstructure:"trace_id_ratio_based,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SimpleLogRecordProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +type SimpleSpanProcessor struct { + // Exporter corresponds to the JSON schema field "exporter". + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +type SpanExporter struct { + // Console corresponds to the JSON schema field "console". + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLPFileDevelopment corresponds to the JSON schema field + // "otlp_file/development". + OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // OTLPGrpc corresponds to the JSON schema field "otlp_grpc". + OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // OTLPHttp corresponds to the JSON schema field "otlp_http". + OTLPHttp *OTLPHttpExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + // Zipkin corresponds to the JSON schema field "zipkin". + Zipkin *ZipkinSpanExporter `json:"zipkin,omitempty" yaml:"zipkin,omitempty" mapstructure:"zipkin,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SpanLimits struct { + // AttributeCountLimit corresponds to the JSON schema field + // "attribute_count_limit". + AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // AttributeValueLengthLimit corresponds to the JSON schema field + // "attribute_value_length_limit". + AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` + + // EventAttributeCountLimit corresponds to the JSON schema field + // "event_attribute_count_limit". + EventAttributeCountLimit *int `json:"event_attribute_count_limit,omitempty" yaml:"event_attribute_count_limit,omitempty" mapstructure:"event_attribute_count_limit,omitempty"` + + // EventCountLimit corresponds to the JSON schema field "event_count_limit". + EventCountLimit *int `json:"event_count_limit,omitempty" yaml:"event_count_limit,omitempty" mapstructure:"event_count_limit,omitempty"` + + // LinkAttributeCountLimit corresponds to the JSON schema field + // "link_attribute_count_limit". + LinkAttributeCountLimit *int `json:"link_attribute_count_limit,omitempty" yaml:"link_attribute_count_limit,omitempty" mapstructure:"link_attribute_count_limit,omitempty"` + + // LinkCountLimit corresponds to the JSON schema field "link_count_limit". + LinkCountLimit *int `json:"link_count_limit,omitempty" yaml:"link_count_limit,omitempty" mapstructure:"link_count_limit,omitempty"` +} + +type SpanProcessor struct { + // Batch corresponds to the JSON schema field "batch". + Batch *BatchSpanProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Simple corresponds to the JSON schema field "simple". + Simple *SimpleSpanProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SumAggregation map[string]interface{} + +type TextMapPropagator struct { + // B3 corresponds to the JSON schema field "b3". + B3 B3Propagator `json:"b3,omitempty" yaml:"b3,omitempty" mapstructure:"b3,omitempty"` + + // B3Multi corresponds to the JSON schema field "b3multi". + B3Multi B3MultiPropagator `json:"b3multi,omitempty" yaml:"b3multi,omitempty" mapstructure:"b3multi,omitempty"` + + // Baggage corresponds to the JSON schema field "baggage". + Baggage BaggagePropagator `json:"baggage,omitempty" yaml:"baggage,omitempty" mapstructure:"baggage,omitempty"` + + // Jaeger corresponds to the JSON schema field "jaeger". + Jaeger JaegerPropagator `json:"jaeger,omitempty" yaml:"jaeger,omitempty" mapstructure:"jaeger,omitempty"` + + // Ottrace corresponds to the JSON schema field "ottrace". + Ottrace OpenTracingPropagator `json:"ottrace,omitempty" yaml:"ottrace,omitempty" mapstructure:"ottrace,omitempty"` + + // Tracecontext corresponds to the JSON schema field "tracecontext". + Tracecontext TraceContextPropagator `json:"tracecontext,omitempty" yaml:"tracecontext,omitempty" mapstructure:"tracecontext,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type TraceContextPropagator map[string]interface{} + +type TraceIDRatioBasedSampler struct { + // Ratio corresponds to the JSON schema field "ratio". + Ratio *float64 `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` +} + +type TracerProviderJson struct { + // Limits corresponds to the JSON schema field "limits". + Limits *SpanLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Processors corresponds to the JSON schema field "processors". + Processors []SpanProcessor `json:"processors" yaml:"processors" mapstructure:"processors"` + + // Sampler corresponds to the JSON schema field "sampler". + Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` + + // TracerConfiguratorDevelopment corresponds to the JSON schema field + // "tracer_configurator/development". + TracerConfiguratorDevelopment *ExperimentalTracerConfigurator `json:"tracer_configurator/development,omitempty" yaml:"tracer_configurator/development,omitempty" mapstructure:"tracer_configurator/development,omitempty"` +} + +type View struct { + // Selector corresponds to the JSON schema field "selector". + Selector *ViewSelector `json:"selector,omitempty" yaml:"selector,omitempty" mapstructure:"selector,omitempty"` + + // Stream corresponds to the JSON schema field "stream". + Stream *ViewStream `json:"stream,omitempty" yaml:"stream,omitempty" mapstructure:"stream,omitempty"` +} + +type ViewSelector struct { + // InstrumentName corresponds to the JSON schema field "instrument_name". + InstrumentName *string `json:"instrument_name,omitempty" yaml:"instrument_name,omitempty" mapstructure:"instrument_name,omitempty"` + + // InstrumentType corresponds to the JSON schema field "instrument_type". + InstrumentType *InstrumentType `json:"instrument_type,omitempty" yaml:"instrument_type,omitempty" mapstructure:"instrument_type,omitempty"` + + // MeterName corresponds to the JSON schema field "meter_name". + MeterName *string `json:"meter_name,omitempty" yaml:"meter_name,omitempty" mapstructure:"meter_name,omitempty"` + + // MeterSchemaUrl corresponds to the JSON schema field "meter_schema_url". + MeterSchemaUrl *string `json:"meter_schema_url,omitempty" yaml:"meter_schema_url,omitempty" mapstructure:"meter_schema_url,omitempty"` + + // MeterVersion corresponds to the JSON schema field "meter_version". + MeterVersion *string `json:"meter_version,omitempty" yaml:"meter_version,omitempty" mapstructure:"meter_version,omitempty"` + + // Unit corresponds to the JSON schema field "unit". + Unit *string `json:"unit,omitempty" yaml:"unit,omitempty" mapstructure:"unit,omitempty"` +} + +type ViewStream struct { + // Aggregation corresponds to the JSON schema field "aggregation". + Aggregation *Aggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` + + // AggregationCardinalityLimit corresponds to the JSON schema field + // "aggregation_cardinality_limit". + AggregationCardinalityLimit *int `json:"aggregation_cardinality_limit,omitempty" yaml:"aggregation_cardinality_limit,omitempty" mapstructure:"aggregation_cardinality_limit,omitempty"` + + // AttributeKeys corresponds to the JSON schema field "attribute_keys". + AttributeKeys *IncludeExclude `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` + + // Description corresponds to the JSON schema field "description". + Description *string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` + + // Name corresponds to the JSON schema field "name". + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +type ZipkinSpanExporter struct { + // Endpoint corresponds to the JSON schema field "endpoint". + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Timeout corresponds to the JSON schema field "timeout". + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} diff --git a/otelconf/v0.4.0/log.go b/otelconf/v0.4.0/log.go new file mode 100644 index 00000000000..1b2849a59bb --- /dev/null +++ b/otelconf/v0.4.0/log.go @@ -0,0 +1,241 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.3.0" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" +) + +func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { + provider, ok := cfg.opentelemetryConfig.LoggerProvider.(*LoggerProviderJson) + if provider == nil { + return noop.NewLoggerProvider(), noopShutdown, nil + } + if !ok { + return noop.NewLoggerProvider(), noopShutdown, errors.New("invalid logger provider") + } + opts := []sdklog.LoggerProviderOption{ + sdklog.WithResource(res), + } + var errs []error + for _, processor := range provider.Processors { + sp, err := logProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdklog.WithProcessor(sp)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) + } + + lp := sdklog.NewLoggerProvider(opts...) + return lp, lp.Shutdown, nil +} + +func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, errors.New("must not specify multiple log processor type") + } + if processor.Batch != nil { + exp, err := logExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchLogProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := logExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdklog.NewSimpleProcessor(exp), nil + } + return nil, errors.New("unsupported log processor type, must be one of simple or batch") +} + +func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { + exportersConfigured := 0 + var exportFunc func() (sdklog.Exporter, error) + + if exporter.Console != nil { + exportersConfigured++ + exportFunc = func() (sdklog.Exporter, error) { + return stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + } + } + + if exporter.OTLPHttp != nil { + exportersConfigured++ + exportFunc = func() (sdklog.Exporter, error) { + return otlpHTTPLogExporter(ctx, exporter.OTLPHttp) + } + } + if exporter.OTLPGrpc != nil { + exportersConfigured++ + exportFunc = func() (sdklog.Exporter, error) { + return otlpGRPCLogExporter(ctx, exporter.OTLPGrpc) + } + } + + if exportersConfigured > 1 { + return nil, errors.New("must not specify multiple exporters") + } + + if exportFunc != nil { + return exportFunc() + } + + return nil, errors.New("no valid log exporter") +} + +func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { + var opts []sdklog.BatchProcessorOption + if blp.ExportTimeout != nil { + if *blp.ExportTimeout < 0 { + return nil, fmt.Errorf("invalid export timeout %d", *blp.ExportTimeout) + } + opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) + } + if blp.MaxExportBatchSize != nil { + if *blp.MaxExportBatchSize < 0 { + return nil, fmt.Errorf("invalid batch size %d", *blp.MaxExportBatchSize) + } + opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) + } + if blp.MaxQueueSize != nil { + if *blp.MaxQueueSize < 0 { + return nil, fmt.Errorf("invalid queue size %d", *blp.MaxQueueSize) + } + opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) + } + + if blp.ScheduleDelay != nil { + if *blp.ScheduleDelay < 0 { + return nil, fmt.Errorf("invalid schedule delay %d", *blp.ScheduleDelay) + } + opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) + } + + return sdklog.NewBatchProcessor(exp, opts...), nil +} + +func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLPHttpExporter) (sdklog.Exporter, error) { + var opts []otlploghttp.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlploghttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlploghttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlploghttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlploghttp.WithHeaders(headersConfig)) + } + + tlsConfig, err := createTLSConfig(otlpConfig.CertificateFile, otlpConfig.ClientCertificateFile, otlpConfig.ClientKeyFile) + if err != nil { + return nil, err + } + opts = append(opts, otlploghttp.WithTLSClientConfig(tlsConfig)) + + return otlploghttp.New(ctx, opts...) +} + +func otlpGRPCLogExporter(ctx context.Context, otlpConfig *OTLPGrpcExporter) (sdklog.Exporter, error) { + var opts []otlploggrpc.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, err + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case + if u.Host != "" { + opts = append(opts, otlploggrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlploggrpc.WithEndpoint(*otlpConfig.Endpoint)) + } + if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Insecure != nil && *otlpConfig.Insecure) { + opts = append(opts, otlploggrpc.WithInsecure()) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlploggrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlploggrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlploggrpc.WithHeaders(headersConfig)) + } + + if otlpConfig.CertificateFile != nil || otlpConfig.ClientCertificateFile != nil || otlpConfig.ClientKeyFile != nil { + tlsConfig, err := createTLSConfig(otlpConfig.CertificateFile, otlpConfig.ClientCertificateFile, otlpConfig.ClientKeyFile) + if err != nil { + return nil, err + } + opts = append(opts, otlploggrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) + } + + return otlploggrpc.New(ctx, opts...) +} diff --git a/otelconf/v0.4.0/log_test.go b/otelconf/v0.4.0/log_test.go new file mode 100644 index 00000000000..c82a5780892 --- /dev/null +++ b/otelconf/v0.4.0/log_test.go @@ -0,0 +1,643 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf + +import ( + "context" + "errors" + "path/filepath" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" +) + +func TestLoggerProvider(t *testing.T) { + for _, tt := range []struct { + name string + cfg configOptions + wantProvider log.LoggerProvider + wantErr error + }{ + { + name: "no-logger-provider-configured", + wantProvider: noop.NewLoggerProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + LoggerProvider: &LoggerProviderJson{ + Processors: []LogRecordProcessor{ + { + Simple: &SimpleLogRecordProcessor{}, + Batch: &BatchLogRecordProcessor{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewLoggerProvider(), + wantErr: errors.Join(errors.New("must not specify multiple log processor type")), + }, + } { + t.Run(tt.name, func(t *testing.T) { + mp, shutdown, err := loggerProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, mp) + assert.Equal(t, tt.wantErr, err) + require.NoError(t, shutdown(context.Background())) + }) + } +} + +func TestLogProcessor(t *testing.T) { + ctx := context.Background() + + otlpHTTPExporter, err := otlploghttp.New(ctx) + require.NoError(t, err) + + otlpGRPCExporter, err := otlploggrpc.New(ctx) + require.NoError(t, err) + + consoleExporter, err := stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + require.NoError(t, err) + + for _, tt := range []struct { + name string + processor LogRecordProcessor + args any + wantErr string + wantProcessor sdklog.Processor + }{ + { + name: "no processor", + wantErr: "unsupported log processor type, must be one of simple or batch", + }, + { + name: "multiple processor types", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + Simple: &SimpleLogRecordProcessor{}, + }, + wantErr: "must not specify multiple log processor type", + }, + { + name: "batch processor invalid batch size otlphttp exporter", + + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(-1), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErr: "invalid batch size -1", + }, + { + name: "batch processor invalid export timeout otlphttp exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + ExportTimeout: ptr(-2), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErr: "invalid export timeout -2", + }, + { + name: "batch processor invalid queue size otlphttp exporter", + + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxQueueSize: ptr(-3), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErr: "invalid queue size -3", + }, + { + name: "batch processor invalid schedule delay console exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + ScheduleDelay: ptr(-4), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErr: "invalid schedule delay -4", + }, + { + name: "batch processor invalid exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + }, + wantErr: "no valid log exporter", + }, + { + name: "batch/console", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + Console: map[string]any{}, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(consoleExporter), + }, + { + name: "batch/otlp-grpc-exporter-no-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("http://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter-socket-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("unix:collector.sock"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-good-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-bad-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not create certificate authority chain from certificate", + }, + { + name: "batch/otlp-grpc-bad-headerslist", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErr: "invalid headers list: invalid key: \"\"", + }, + { + name: "batch/otlp-grpc-bad-client-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + ClientCertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + ClientKeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", + }, + { + name: "batch/otlp-grpc-exporter-no-scheme", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-invalid-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "parse \" \": invalid URI for request", + }, + { + name: "batch/otlp-grpc-invalid-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "unsupported compression \"invalid\"", + }, + { + name: "batch/otlp-http-exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-good-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-bad-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not create certificate authority chain from certificate", + }, + { + name: "batch/otlp-http-bad-client-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + ClientCertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + ClientKeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", + }, + { + name: "batch/otlp-http-bad-headerslist", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErr: "invalid headers list: invalid key: \"\"", + }, + { + name: "batch/otlp-http-exporter-with-path", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-scheme", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "parse \" \": invalid URI for request", + }, + { + name: "batch/otlp-http-none-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "unsupported compression \"invalid\"", + }, + { + name: "simple/no-exporter", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + }, + wantErr: "no valid log exporter", + }, + { + name: "simple/console", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: map[string]any{}, + }, + }, + }, + wantProcessor: sdklog.NewSimpleProcessor(consoleExporter), + }, + { + name: "simple/otlp-exporter", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewSimpleProcessor(otlpHTTPExporter), + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := logProcessor(context.Background(), tt.processor) + if tt.wantErr != "" { + require.Error(t, err) + require.Equal(t, tt.wantErr, err.Error()) + } else { + require.NoError(t, err) + } + if tt.wantProcessor == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName("exporter").Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName("exporter").Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + } + }) + } +} diff --git a/otelconf/v0.4.0/metric.go b/otelconf/v0.4.0/metric.go new file mode 100644 index 00000000000..c875f27e4a8 --- /dev/null +++ b/otelconf/v0.4.0/metric.go @@ -0,0 +1,588 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.3.0" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "net" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +var zeroScope instrumentation.Scope + +const instrumentKindUndefined = sdkmetric.InstrumentKind(0) + +func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { + provider, ok := cfg.opentelemetryConfig.MeterProvider.(*MeterProviderJson) + if provider == nil { + return noop.NewMeterProvider(), noopShutdown, nil + } + if !ok { + return noop.NewMeterProvider(), noopShutdown, errors.New("invalid meter provider") + } + + opts := []sdkmetric.Option{ + sdkmetric.WithResource(res), + } + + var errs []error + for _, reader := range provider.Readers { + r, err := metricReader(cfg.ctx, reader) + if err == nil { + opts = append(opts, sdkmetric.WithReader(r)) + } else { + errs = append(errs, err) + } + } + for _, vw := range provider.Views { + v, err := view(vw) + if err == nil { + opts = append(opts, sdkmetric.WithView(v)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) + } + + mp := sdkmetric.NewMeterProvider(opts...) + return mp, mp.Shutdown, nil +} + +func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) { + if r.Periodic != nil && r.Pull != nil { + return nil, errors.New("must not specify multiple metric reader type") + } + + if r.Periodic != nil { + var opts []sdkmetric.PeriodicReaderOption + if r.Periodic.Interval != nil { + opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) + } + + if r.Periodic.Timeout != nil { + opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) + } + return periodicExporter(ctx, r.Periodic.Exporter, opts...) + } + + if r.Pull != nil { + return pullReader(ctx, r.Pull.Exporter) + } + return nil, errors.New("no valid metric reader") +} + +func pullReader(ctx context.Context, exporter PullMetricExporter) (sdkmetric.Reader, error) { + if exporter.PrometheusDevelopment != nil { + return prometheusReader(ctx, exporter.PrometheusDevelopment) + } + return nil, errors.New("no valid metric exporter") +} + +func periodicExporter(ctx context.Context, exporter PushMetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { + exportersConfigured := 0 + var exportFunc func() (sdkmetric.Reader, error) + + if exporter.Console != nil { + exportersConfigured++ + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + + exp, err := stdoutmetric.New( + stdoutmetric.WithEncoder(enc), + ) + if err != nil { + return nil, err + } + exportFunc = func() (sdkmetric.Reader, error) { + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + } + if exporter.OTLPHttp != nil { + exportersConfigured++ + exp, err := otlpHTTPMetricExporter(ctx, exporter.OTLPHttp) + if err != nil { + return nil, err + } + exportFunc = func() (sdkmetric.Reader, error) { + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + } + if exporter.OTLPGrpc != nil { + exportersConfigured++ + exp, err := otlpGRPCMetricExporter(ctx, exporter.OTLPGrpc) + if err != nil { + return nil, err + } + exportFunc = func() (sdkmetric.Reader, error) { + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + } + + if exportersConfigured > 1 { + return nil, errors.New("must not specify multiple exporters") + } + + if exportFunc != nil { + return exportFunc() + } + + return nil, errors.New("no valid metric exporter") +} + +func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPHttpMetricExporter) (sdkmetric.Exporter, error) { + opts := []otlpmetrichttp.Option{} + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlpmetrichttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil { + opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlpmetrichttp.WithHeaders(headersConfig)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(deltaTemporality)) + case "cumulative": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(cumulativeTemporality)) + case "low_memory": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(lowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } + + tlsConfig, err := createTLSConfig(otlpConfig.CertificateFile, otlpConfig.ClientCertificateFile, otlpConfig.ClientKeyFile) + if err != nil { + return nil, err + } + opts = append(opts, otlpmetrichttp.WithTLSClientConfig(tlsConfig)) + + return otlpmetrichttp.New(ctx, opts...) +} + +func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPGrpcMetricExporter) (sdkmetric.Exporter, error) { + var opts []otlpmetricgrpc.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, err + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case + if u.Host != "" { + opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlpmetricgrpc.WithEndpoint(*otlpConfig.Endpoint)) + } + if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Insecure != nil && *otlpConfig.Insecure) { + opts = append(opts, otlpmetricgrpc.WithInsecure()) + } + } + + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlpmetricgrpc.WithHeaders(headersConfig)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(deltaTemporality)) + case "cumulative": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(cumulativeTemporality)) + case "low_memory": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(lowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } + + if otlpConfig.CertificateFile != nil || otlpConfig.ClientCertificateFile != nil || otlpConfig.ClientKeyFile != nil { + tlsConfig, err := createTLSConfig(otlpConfig.CertificateFile, otlpConfig.ClientCertificateFile, otlpConfig.ClientKeyFile) + if err != nil { + return nil, err + } + opts = append(opts, otlpmetricgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) + } + + return otlpmetricgrpc.New(ctx, opts...) +} + +func cumulativeTemporality(sdkmetric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func deltaTemporality(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram, sdkmetric.InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func lowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +// newIncludeExcludeFilter returns a Filter that includes attributes +// in the include list and excludes attributes in the excludes list. +// It returns an error if an attribute is in both lists +// +// If IncludeExclude is empty a include-all filter is returned. +func newIncludeExcludeFilter(lists *IncludeExclude) (attribute.Filter, error) { + if lists == nil { + return func(kv attribute.KeyValue) bool { return true }, nil + } + + included := make(map[attribute.Key]struct{}) + for _, k := range lists.Included { + included[attribute.Key(k)] = struct{}{} + } + excluded := make(map[attribute.Key]struct{}) + for _, k := range lists.Excluded { + if _, ok := included[attribute.Key(k)]; ok { + return nil, fmt.Errorf("attribute cannot be in both include and exclude list: %s", k) + } + excluded[attribute.Key(k)] = struct{}{} + } + return func(kv attribute.KeyValue) bool { + // check if a value is excluded first + if _, ok := excluded[kv.Key]; ok { + return false + } + + if len(included) == 0 { + return true + } + + _, ok := included[kv.Key] + return ok + }, nil +} + +func prometheusReader(ctx context.Context, prometheusConfig *ExperimentalPrometheusMetricExporter) (sdkmetric.Reader, error) { + if prometheusConfig.Host == nil { + return nil, errors.New("host must be specified") + } + if prometheusConfig.Port == nil { + return nil, errors.New("port must be specified") + } + + opts, err := prometheusReaderOpts(prometheusConfig) + if err != nil { + return nil, err + } + + reg := prometheus.NewRegistry() + opts = append(opts, otelprom.WithRegisterer(reg)) + + reader, err := otelprom.New(opts...) + if err != nil { + return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) + } + + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) + server := http.Server{ + // Timeouts are necessary to make a server resilient to attacks. + // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + IdleTimeout: 120 * time.Second, + Handler: mux, + } + + // Remove surrounding "[]" from the host definition to allow users to define the host as "[::1]" or "::1". + host := *prometheusConfig.Host + if len(host) > 2 && host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + + addr := net.JoinHostPort(host, strconv.Itoa(*prometheusConfig.Port)) + lis, err := net.Listen("tcp", addr) + if err != nil { + return nil, errors.Join( + fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), + reader.Shutdown(ctx), + ) + } + + // Only for testing reasons, add the address to the http Server, will not be used. + server.Addr = lis.Addr().String() + + go func() { + if err := server.Serve(lis); err != nil && !errors.Is(err, http.ErrServerClosed) { + otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) + } + }() + + return readerWithServer{reader, &server}, nil +} + +func prometheusReaderOpts(prometheusConfig *ExperimentalPrometheusMetricExporter) ([]otelprom.Option, error) { + var opts []otelprom.Option + if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { + opts = append(opts, otelprom.WithoutScopeInfo()) + } + if prometheusConfig.WithoutTypeSuffix != nil && *prometheusConfig.WithoutTypeSuffix { + opts = append(opts, otelprom.WithoutCounterSuffixes()) + } + if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { + opts = append(opts, otelprom.WithoutUnits()) + } + if prometheusConfig.WithResourceConstantLabels != nil { + f, err := newIncludeExcludeFilter(prometheusConfig.WithResourceConstantLabels) + if err != nil { + return nil, err + } + opts = append(opts, otelprom.WithResourceAsConstantLabels(f)) + } + + return opts, nil +} + +type readerWithServer struct { + sdkmetric.Reader + server *http.Server +} + +func (rws readerWithServer) Shutdown(ctx context.Context) error { + return errors.Join( + rws.Reader.Shutdown(ctx), + rws.server.Shutdown(ctx), + ) +} + +func view(v View) (sdkmetric.View, error) { + if v.Selector == nil { + return nil, errors.New("view: no selector provided") + } + + inst, err := instrument(*v.Selector) + if err != nil { + return nil, err + } + + s, err := stream(v.Stream) + if err != nil { + return nil, err + } + return sdkmetric.NewView(inst, s), nil +} + +func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { + kind, err := instrumentKind(vs.InstrumentType) + if err != nil { + return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) + } + inst := sdkmetric.Instrument{ + Name: strOrEmpty(vs.InstrumentName), + Unit: strOrEmpty(vs.Unit), + Kind: kind, + Scope: instrumentation.Scope{ + Name: strOrEmpty(vs.MeterName), + Version: strOrEmpty(vs.MeterVersion), + SchemaURL: strOrEmpty(vs.MeterSchemaUrl), + }, + } + + if instrumentIsEmpty(inst) { + return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") + } + return inst, nil +} + +func stream(vs *ViewStream) (sdkmetric.Stream, error) { + if vs == nil { + return sdkmetric.Stream{}, nil + } + + f, err := newIncludeExcludeFilter(vs.AttributeKeys) + if err != nil { + return sdkmetric.Stream{}, err + } + return sdkmetric.Stream{ + Name: strOrEmpty(vs.Name), + Description: strOrEmpty(vs.Description), + Aggregation: aggregation(vs.Aggregation), + AttributeFilter: f, + }, nil +} + +func aggregation(aggr *Aggregation) sdkmetric.Aggregation { + if aggr == nil { + return nil + } + + if aggr.Base2ExponentialBucketHistogram != nil { + return sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), + MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), + } + } + if aggr.Default != nil { + // TODO: Understand what to set here. + return nil + } + if aggr.Drop != nil { + return sdkmetric.AggregationDrop{} + } + if aggr.ExplicitBucketHistogram != nil { + return sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: aggr.ExplicitBucketHistogram.Boundaries, + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), + } + } + if aggr.LastValue != nil { + return sdkmetric.AggregationLastValue{} + } + if aggr.Sum != nil { + return sdkmetric.AggregationSum{} + } + return nil +} + +func instrumentKind(vsit *InstrumentType) (sdkmetric.InstrumentKind, error) { + if vsit == nil { + // Equivalent to instrumentKindUndefined. + return instrumentKindUndefined, nil + } + + switch *vsit { + case InstrumentTypeCounter: + return sdkmetric.InstrumentKindCounter, nil + case InstrumentTypeUpDownCounter: + return sdkmetric.InstrumentKindUpDownCounter, nil + case InstrumentTypeHistogram: + return sdkmetric.InstrumentKindHistogram, nil + case InstrumentTypeObservableCounter: + return sdkmetric.InstrumentKindObservableCounter, nil + case InstrumentTypeObservableUpDownCounter: + return sdkmetric.InstrumentKindObservableUpDownCounter, nil + case InstrumentTypeObservableGauge: + return sdkmetric.InstrumentKindObservableGauge, nil + } + + return instrumentKindUndefined, errors.New("instrument_type: invalid value") +} + +func instrumentIsEmpty(i sdkmetric.Instrument) bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +func boolOrFalse(pBool *bool) bool { + if pBool == nil { + return false + } + return *pBool +} + +func int32OrZero(pInt *int) int32 { + if pInt == nil { + return 0 + } + i := *pInt + if i > math.MaxInt32 { + return math.MaxInt32 + } + if i < math.MinInt32 { + return math.MinInt32 + } + return int32(i) // nolint: gosec // Overflow and underflow checked above. +} + +func strOrEmpty(pStr *string) string { + if pStr == nil { + return "" + } + return *pStr +} diff --git a/otelconf/v0.4.0/metric_test.go b/otelconf/v0.4.0/metric_test.go new file mode 100644 index 00000000000..c5dd96ca26e --- /dev/null +++ b/otelconf/v0.4.0/metric_test.go @@ -0,0 +1,1346 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf + +import ( + "context" + "errors" + "fmt" + "net/http" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" +) + +func TestMeterProvider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider metric.MeterProvider + wantErr error + }{ + { + name: "no-meter-provider-configured", + wantProvider: noop.NewMeterProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + MeterProvider: &MeterProviderJson{ + Readers: []MetricReader{ + { + Periodic: &PeriodicMetricReader{}, + Pull: &PullMetricReader{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewMeterProvider(), + wantErr: errors.Join(errors.New("must not specify multiple metric reader type")), + }, + { + name: "multiple-errors-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + MeterProvider: &MeterProviderJson{ + Readers: []MetricReader{ + { + Periodic: &PeriodicMetricReader{}, + Pull: &PullMetricReader{}, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + Console: ConsoleExporter{}, + OTLPGrpc: &OTLPGrpcMetricExporter{}, + }, + }, + }, + }, + }, + }, + }, + wantProvider: noop.NewMeterProvider(), + wantErr: errors.Join(errors.New("must not specify multiple metric reader type"), errors.New("must not specify multiple exporters")), + }, + } + for _, tt := range tests { + mp, shutdown, err := meterProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, mp) + assert.Equal(t, tt.wantErr, err) + require.NoError(t, shutdown(context.Background())) + } +} + +func TestReader(t *testing.T) { + consoleExporter, err := stdoutmetric.New( + stdoutmetric.WithPrettyPrint(), + ) + require.NoError(t, err) + ctx := context.Background() + otlpGRPCExporter, err := otlpmetricgrpc.New(ctx) + require.NoError(t, err) + otlpHTTPExporter, err := otlpmetrichttp.New(ctx) + require.NoError(t, err) + promExporter, err := otelprom.New() + require.NoError(t, err) + testCases := []struct { + name string + reader MetricReader + args any + wantErr string + wantReader sdkmetric.Reader + }{ + { + name: "no reader", + wantErr: "no valid metric reader", + }, + { + name: "pull/no-exporter", + reader: MetricReader{ + Pull: &PullMetricReader{}, + }, + wantErr: "no valid metric exporter", + }, + { + name: "pull/prometheus-no-host", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{}, + }, + }, + }, + wantErr: "host must be specified", + }, + { + name: "pull/prometheus-no-port", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + }, + }, + }, + }, + wantErr: "port must be specified", + }, + { + name: "pull/prometheus", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + Port: ptr(0), + WithoutScopeInfo: ptr(true), + WithoutUnits: ptr(true), + WithoutTypeSuffix: ptr(true), + WithResourceConstantLabels: &IncludeExclude{ + Included: []string{"include"}, + Excluded: []string{"exclude"}, + }, + }, + }, + }, + }, + wantReader: readerWithServer{promExporter, nil}, + }, + { + name: "periodic/otlp-grpc-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-with-path", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-good-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-bad-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not create certificate authority chain from certificate", + }, + { + name: "periodic/otlp-grpc-bad-client-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + ClientCertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + ClientKeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", + }, + { + name: "periodic/otlp-grpc-bad-headerslist", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErr: "invalid headers list: invalid key: \"\"", + }, + { + name: "periodic/otlp-grpc-exporter-no-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-socket-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("unix:collector.sock"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-no-scheme", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-invalid-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "parse \" \": invalid URI for request", + }, + { + name: "periodic/otlp-grpc-none-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-delta-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-cumulative-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceCumulative), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-lowmemory-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceLowMemory), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-invalid-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: (*ExporterTemporalityPreference)(ptr("invalid")), + }, + }, + }, + }, + wantErr: "unsupported temporality preference \"invalid\"", + }, + { + name: "periodic/otlp-grpc-invalid-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "unsupported compression \"invalid\"", + }, + { + name: "periodic/otlp-http-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-good-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-bad-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not create certificate authority chain from certificate", + }, + { + name: "periodic/otlp-http-bad-client-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + ClientCertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + ClientKeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", + }, + { + name: "periodic/otlp-http-bad-headerslist", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErr: "invalid headers list: invalid key: \"\"", + }, + { + name: "periodic/otlp-http-exporter-with-path", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-exporter-no-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-exporter-no-scheme", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-invalid-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "parse \" \": invalid URI for request", + }, + { + name: "periodic/otlp-http-none-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-cumulative-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceCumulative), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-lowmemory-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceLowMemory), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-delta-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-invalid-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: (*ExporterTemporalityPreference)(ptr("invalid")), + }, + }, + }, + }, + wantErr: "unsupported temporality preference \"invalid\"", + }, + { + name: "periodic/otlp-http-invalid-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "unsupported compression \"invalid\"", + }, + { + name: "periodic/no-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{}, + }, + }, + wantErr: "no valid metric exporter", + }, + { + name: "periodic/console-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(consoleExporter), + }, + { + name: "periodic/console-exporter-with-extra-options", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Interval: ptr(30_000), + Timeout: ptr(5_000), + Exporter: PushMetricExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader( + consoleExporter, + sdkmetric.WithInterval(30_000*time.Millisecond), + sdkmetric.WithTimeout(5_000*time.Millisecond), + ), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := metricReader(context.Background(), tt.reader) + if tt.wantErr != "" { + require.Error(t, err) + require.Equal(t, tt.wantErr, err.Error()) + } else { + require.NoError(t, err) + } + if tt.wantReader == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantReader), reflect.TypeOf(got)) + var fieldName string + switch reflect.TypeOf(tt.wantReader).String() { + case "*metric.PeriodicReader": + fieldName = "exporter" + case "otelconf.readerWithServer": + fieldName = "Reader" + default: + fieldName = "e" + } + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantReader)).FieldByName(fieldName).Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + require.NoError(t, got.Shutdown(context.Background())) + } + }) + } +} + +func TestView(t *testing.T) { + testCases := []struct { + name string + view View + args any + wantErr string + matchInstrument *sdkmetric.Instrument + wantStream sdkmetric.Stream + wantResult bool + }{ + { + name: "no selector", + wantErr: "view: no selector provided", + }, + { + name: "selector/invalid_type", + view: View{ + Selector: &ViewSelector{ + InstrumentType: (*InstrumentType)(ptr("invalid_type")), + }, + }, + wantErr: "view_selector: instrument_type: invalid value", + }, + { + name: "selector/invalid_type", + view: View{ + Selector: &ViewSelector{}, + }, + wantErr: "view_selector: empty selector not supporter", + }, + { + name: "all selectors match", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{Name: "test_name", Unit: "test_unit"}, + wantResult: true, + }, + { + name: "all selectors no match name", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "not_match", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match unit", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "not_match", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match kind", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeHistogram), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter name", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "not_match", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter version", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "not_match", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter schema url", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "not_match", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "with stream", + view: View{ + Selector: &ViewSelector{ + InstrumentName: ptr("test_name"), + Unit: ptr("test_unit"), + }, + Stream: &ViewStream{ + Name: ptr("new_name"), + Description: ptr("new_description"), + AttributeKeys: ptr(IncludeExclude{Included: []string{"foo", "bar"}}), + Aggregation: &Aggregation{Sum: make(SumAggregation)}, + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Description: "test_description", + Unit: "test_unit", + }, + wantStream: sdkmetric.Stream{ + Name: "new_name", + Description: "new_description", + Unit: "test_unit", + Aggregation: sdkmetric.AggregationSum{}, + }, + wantResult: true, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := view(tt.view) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + require.Nil(t, got) + } else { + require.NoError(t, err) + gotStream, gotResult := got(*tt.matchInstrument) + // Remove filter, since it cannot be compared + gotStream.AttributeFilter = nil + require.Equal(t, tt.wantStream, gotStream) + require.Equal(t, tt.wantResult, gotResult) + } + }) + } +} + +func TestInstrumentType(t *testing.T) { + testCases := []struct { + name string + instType *InstrumentType + wantErr error + wantKind sdkmetric.InstrumentKind + }{ + { + name: "nil", + wantKind: sdkmetric.InstrumentKind(0), + }, + { + name: "counter", + instType: ptr(InstrumentTypeCounter), + wantKind: sdkmetric.InstrumentKindCounter, + }, + { + name: "up_down_counter", + instType: ptr(InstrumentTypeUpDownCounter), + wantKind: sdkmetric.InstrumentKindUpDownCounter, + }, + { + name: "histogram", + instType: ptr(InstrumentTypeHistogram), + wantKind: sdkmetric.InstrumentKindHistogram, + }, + { + name: "observable_counter", + instType: ptr(InstrumentTypeObservableCounter), + wantKind: sdkmetric.InstrumentKindObservableCounter, + }, + { + name: "observable_up_down_counter", + instType: ptr(InstrumentTypeObservableUpDownCounter), + wantKind: sdkmetric.InstrumentKindObservableUpDownCounter, + }, + { + name: "observable_gauge", + instType: ptr(InstrumentTypeObservableGauge), + wantKind: sdkmetric.InstrumentKindObservableGauge, + }, + { + name: "invalid", + instType: (*InstrumentType)(ptr("invalid")), + wantErr: errors.New("instrument_type: invalid value"), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := instrumentKind(tt.instType) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr, err) + require.Zero(t, got) + } else { + require.NoError(t, err) + require.Equal(t, tt.wantKind, got) + } + }) + } +} + +func TestAggregation(t *testing.T) { + testCases := []struct { + name string + aggregation *Aggregation + wantAggregation sdkmetric.Aggregation + }{ + { + name: "nil", + wantAggregation: nil, + }, + { + name: "empty", + aggregation: &Aggregation{}, + wantAggregation: nil, + }, + { + name: "Base2ExponentialBucketHistogram empty", + aggregation: &Aggregation{ + Base2ExponentialBucketHistogram: &Base2ExponentialBucketHistogramAggregation{}, + }, + wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: 0, + MaxScale: 0, + NoMinMax: true, + }, + }, + { + name: "Base2ExponentialBucketHistogram", + aggregation: &Aggregation{ + Base2ExponentialBucketHistogram: &Base2ExponentialBucketHistogramAggregation{ + MaxSize: ptr(2), + MaxScale: ptr(3), + RecordMinMax: ptr(true), + }, + }, + wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: 2, + MaxScale: 3, + NoMinMax: false, + }, + }, + { + name: "Default", + aggregation: &Aggregation{ + Default: make(DefaultAggregation), + }, + wantAggregation: nil, + }, + { + name: "Drop", + aggregation: &Aggregation{ + Drop: make(DropAggregation), + }, + wantAggregation: sdkmetric.AggregationDrop{}, + }, + { + name: "ExplicitBucketHistogram empty", + aggregation: &Aggregation{ + ExplicitBucketHistogram: &ExplicitBucketHistogramAggregation{}, + }, + wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: nil, + NoMinMax: true, + }, + }, + { + name: "ExplicitBucketHistogram", + aggregation: &Aggregation{ + ExplicitBucketHistogram: &ExplicitBucketHistogramAggregation{ + Boundaries: []float64{1, 2, 3}, + RecordMinMax: ptr(true), + }, + }, + wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: []float64{1, 2, 3}, + NoMinMax: false, + }, + }, + { + name: "LastValue", + aggregation: &Aggregation{ + LastValue: make(LastValueAggregation), + }, + wantAggregation: sdkmetric.AggregationLastValue{}, + }, + { + name: "Sum", + aggregation: &Aggregation{ + Sum: make(SumAggregation), + }, + wantAggregation: sdkmetric.AggregationSum{}, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got := aggregation(tt.aggregation) + require.Equal(t, tt.wantAggregation, got) + }) + } +} + +func TestNewIncludeExcludeFilter(t *testing.T) { + testCases := []struct { + name string + attributeKeys *IncludeExclude + wantPass []string + wantFail []string + }{ + { + name: "empty", + attributeKeys: nil, + wantPass: []string{"foo", "bar"}, + wantFail: nil, + }, + { + name: "filter-with-include", + attributeKeys: ptr(IncludeExclude{ + Included: []string{"foo"}, + }), + wantPass: []string{"foo"}, + wantFail: []string{"bar"}, + }, + { + name: "filter-with-exclude", + attributeKeys: ptr(IncludeExclude{ + Excluded: []string{"foo"}, + }), + wantPass: []string{"bar"}, + wantFail: []string{"foo"}, + }, + { + name: "filter-with-include-and-exclude", + attributeKeys: ptr(IncludeExclude{ + Included: []string{"bar"}, + Excluded: []string{"foo"}, + }), + wantPass: []string{"bar"}, + wantFail: []string{"foo"}, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := newIncludeExcludeFilter(tt.attributeKeys) + require.NoError(t, err) + for _, pass := range tt.wantPass { + require.True(t, got(attribute.KeyValue{Key: attribute.Key(pass), Value: attribute.StringValue("")})) + } + for _, fail := range tt.wantFail { + require.False(t, got(attribute.KeyValue{Key: attribute.Key(fail), Value: attribute.StringValue("")})) + } + }) + } +} + +func TestNewIncludeExcludeFilterError(t *testing.T) { + _, err := newIncludeExcludeFilter(ptr(IncludeExclude{ + Included: []string{"foo"}, + Excluded: []string{"foo"}, + })) + require.Equal(t, fmt.Errorf("attribute cannot be in both include and exclude list: foo"), err) +} + +func TestPrometheusReaderOpts(t *testing.T) { + testCases := []struct { + name string + cfg ExperimentalPrometheusMetricExporter + wantOptions int + }{ + { + name: "no options", + cfg: ExperimentalPrometheusMetricExporter{}, + wantOptions: 0, + }, + { + name: "all set", + cfg: ExperimentalPrometheusMetricExporter{ + WithoutScopeInfo: ptr(true), + WithoutTypeSuffix: ptr(true), + WithoutUnits: ptr(true), + WithResourceConstantLabels: &IncludeExclude{}, + }, + wantOptions: 4, + }, + { + name: "all set false", + cfg: ExperimentalPrometheusMetricExporter{ + WithoutScopeInfo: ptr(false), + WithoutTypeSuffix: ptr(false), + WithoutUnits: ptr(false), + WithResourceConstantLabels: &IncludeExclude{}, + }, + wantOptions: 1, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + opts, err := prometheusReaderOpts(&tt.cfg) + require.NoError(t, err) + require.Len(t, opts, tt.wantOptions) + }) + } +} + +func TestPrometheusIPv6(t *testing.T) { + tests := []struct { + name string + host string + }{ + { + name: "IPv6", + host: "::1", + }, + { + name: "[IPv6]", + host: "[::1]", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + port := 0 + cfg := ExperimentalPrometheusMetricExporter{ + Host: &tt.host, + Port: &port, + WithoutScopeInfo: ptr(true), + WithoutTypeSuffix: ptr(true), + WithoutUnits: ptr(true), + WithResourceConstantLabels: &IncludeExclude{}, + } + + rs, err := prometheusReader(context.Background(), &cfg) + t.Cleanup(func() { + require.NoError(t, rs.Shutdown(context.Background())) + }) + require.NoError(t, err) + + hServ := rs.(readerWithServer).server + assert.True(t, strings.HasPrefix(hServ.Addr, "[::1]:")) + + resp, err := http.DefaultClient.Get("http://" + hServ.Addr + "/metrics") + t.Cleanup(func() { + require.NoError(t, resp.Body.Close()) + }) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + }) + } +} diff --git a/otelconf/v0.4.0/resource.go b/otelconf/v0.4.0/resource.go new file mode 100644 index 00000000000..b17f35cea7d --- /dev/null +++ b/otelconf/v0.4.0/resource.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.3.0" + +import ( + "fmt" + "strconv" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" +) + +func keyVal(k string, v any) attribute.KeyValue { + switch val := v.(type) { + case bool: + return attribute.Bool(k, val) + case int64: + return attribute.Int64(k, val) + case uint64: + return attribute.String(k, strconv.FormatUint(val, 10)) + case float64: + return attribute.Float64(k, val) + case int8: + return attribute.Int64(k, int64(val)) + case uint8: + return attribute.Int64(k, int64(val)) + case int16: + return attribute.Int64(k, int64(val)) + case uint16: + return attribute.Int64(k, int64(val)) + case int32: + return attribute.Int64(k, int64(val)) + case uint32: + return attribute.Int64(k, int64(val)) + case float32: + return attribute.Float64(k, float64(val)) + case int: + return attribute.Int(k, val) + case uint: + return attribute.String(k, strconv.FormatUint(uint64(val), 10)) + case string: + return attribute.String(k, val) + default: + return attribute.String(k, fmt.Sprint(v)) + } +} + +func newResource(res *ResourceJson) *resource.Resource { + if res == nil { + return resource.Default() + } + + var attrs []attribute.KeyValue + for _, v := range res.Attributes { + attrs = append(attrs, keyVal(v.Name, v.Value)) + } + + if res.SchemaUrl == nil { + return resource.NewSchemaless(attrs...) + } + return resource.NewWithAttributes(*res.SchemaUrl, attrs...) +} diff --git a/otelconf/v0.4.0/resource_test.go b/otelconf/v0.4.0/resource_test.go new file mode 100644 index 00000000000..79b9379d932 --- /dev/null +++ b/otelconf/v0.4.0/resource_test.go @@ -0,0 +1,113 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type mockType struct{} + +func TestNewResource(t *testing.T) { + other := mockType{} + tests := []struct { + name string + config *ResourceJson + wantResource *resource.Resource + }{ + { + name: "no-resource-configuration", + wantResource: resource.Default(), + }, + { + name: "resource-no-attributes", + config: &ResourceJson{}, + wantResource: resource.NewSchemaless(), + }, + { + name: "resource-with-schema", + config: &ResourceJson{ + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resource.NewWithAttributes(semconv.SchemaURL), + }, + { + name: "resource-with-attributes", + config: &ResourceJson{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "service-a"}, + }, + }, + wantResource: resource.NewWithAttributes("", + semconv.ServiceName("service-a"), + ), + }, + { + name: "resource-with-attributes-and-schema", + config: &ResourceJson{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "service-a"}, + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + ), + }, + { + name: "resource-with-additional-attributes-and-schema", + config: &ResourceJson{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "service-a"}, + {Name: "attr-bool", Value: true}, + {Name: "attr-int64", Value: int64(-164)}, + {Name: "attr-uint64", Value: uint64(164)}, + {Name: "attr-float64", Value: float64(64.0)}, + {Name: "attr-int8", Value: int8(-18)}, + {Name: "attr-uint8", Value: uint8(18)}, + {Name: "attr-int16", Value: int16(-116)}, + {Name: "attr-uint16", Value: uint16(116)}, + {Name: "attr-int32", Value: int32(-132)}, + {Name: "attr-uint32", Value: uint32(132)}, + {Name: "attr-float32", Value: float32(32.0)}, + {Name: "attr-int", Value: int(-1)}, + {Name: "attr-uint", Value: uint(1)}, + {Name: "attr-string", Value: "string-val"}, + {Name: "attr-default", Value: other}, + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + attribute.Bool("attr-bool", true), + attribute.String("attr-uint64", fmt.Sprintf("%d", 164)), + attribute.Int64("attr-int64", int64(-164)), + attribute.Float64("attr-float64", float64(64.0)), + attribute.Int64("attr-int8", int64(-18)), + attribute.Int64("attr-uint8", int64(18)), + attribute.Int64("attr-int16", int64(-116)), + attribute.Int64("attr-uint16", int64(116)), + attribute.Int64("attr-int32", int64(-132)), + attribute.Int64("attr-uint32", int64(132)), + attribute.Float64("attr-float32", float64(32.0)), + attribute.Int64("attr-int", int64(-1)), + attribute.String("attr-uint", fmt.Sprintf("%d", 1)), + attribute.String("attr-string", "string-val"), + attribute.String("attr-default", fmt.Sprintf("%v", other))), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := newResource(tt.config) + assert.Equal(t, tt.wantResource, got) + }) + } +} diff --git a/otelconf/v0.4.0/trace.go b/otelconf/v0.4.0/trace.go new file mode 100644 index 00000000000..0e8419bb11f --- /dev/null +++ b/otelconf/v0.4.0/trace.go @@ -0,0 +1,327 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf // import "go.opentelemetry.io/contrib/otelconf/v0.3.0" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +var errInvalidSamplerConfiguration = errors.New("invalid sampler configuration") + +func tracerProvider(cfg configOptions, res *resource.Resource) (trace.TracerProvider, shutdownFunc, error) { + provider, ok := cfg.opentelemetryConfig.TracerProvider.(*TracerProviderJson) + if provider == nil { + return noop.NewTracerProvider(), noopShutdown, nil + } + if !ok { + return noop.NewTracerProvider(), noopShutdown, errors.New("invalid tracer provider") + } + opts := []sdktrace.TracerProviderOption{ + sdktrace.WithResource(res), + } + var errs []error + for _, processor := range provider.Processors { + sp, err := spanProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdktrace.WithSpanProcessor(sp)) + } else { + errs = append(errs, err) + } + } + if s, err := sampler(provider.Sampler); err == nil { + opts = append(opts, sdktrace.WithSampler(s)) + } else { + errs = append(errs, err) + } + if len(errs) > 0 { + return noop.NewTracerProvider(), noopShutdown, errors.Join(errs...) + } + tp := sdktrace.NewTracerProvider(opts...) + return tp, tp.Shutdown, nil +} + +func parentBasedSampler(s *ParentBasedSampler) (sdktrace.Sampler, error) { + var rootSampler sdktrace.Sampler + var opts []sdktrace.ParentBasedSamplerOption + var errs []error + var err error + + if s.Root == nil { + rootSampler = sdktrace.AlwaysSample() + } else { + rootSampler, err = sampler(s.Root) + if err != nil { + errs = append(errs, err) + } + } + if s.RemoteParentSampled != nil { + remoteParentSampler, err := sampler(s.RemoteParentSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithRemoteParentSampled(remoteParentSampler)) + } + } + if s.RemoteParentNotSampled != nil { + remoteParentNotSampler, err := sampler(s.RemoteParentNotSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithRemoteParentNotSampled(remoteParentNotSampler)) + } + } + if s.LocalParentSampled != nil { + localParentSampler, err := sampler(s.LocalParentSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithLocalParentSampled(localParentSampler)) + } + } + if s.LocalParentNotSampled != nil { + localParentNotSampler, err := sampler(s.LocalParentNotSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithLocalParentNotSampled(localParentNotSampler)) + } + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + return sdktrace.ParentBased(rootSampler, opts...), nil +} + +func sampler(s *Sampler) (sdktrace.Sampler, error) { + if s == nil { + // If omitted, parent based sampler with a root of always_on is used. + return sdktrace.ParentBased(sdktrace.AlwaysSample()), nil + } + if s.ParentBased != nil { + return parentBasedSampler(s.ParentBased) + } + if s.AlwaysOff != nil { + return sdktrace.NeverSample(), nil + } + if s.AlwaysOn != nil { + return sdktrace.AlwaysSample(), nil + } + if s.TraceIDRatioBased != nil { + if s.TraceIDRatioBased.Ratio == nil { + return sdktrace.TraceIDRatioBased(1), nil + } + return sdktrace.TraceIDRatioBased(*s.TraceIDRatioBased.Ratio), nil + } + return nil, errInvalidSamplerConfiguration +} + +func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExporter, error) { + exportersConfigured := 0 + var exportFunc func() (sdktrace.SpanExporter, error) + + if exporter.Console != nil { + exportersConfigured++ + exportFunc = func() (sdktrace.SpanExporter, error) { + return stdouttrace.New( + stdouttrace.WithPrettyPrint(), + ) + } + } + if exporter.OTLPHttp != nil { + exportersConfigured++ + exportFunc = func() (sdktrace.SpanExporter, error) { + return otlpHTTPSpanExporter(ctx, exporter.OTLPHttp) + } + } + if exporter.OTLPGrpc != nil { + exportersConfigured++ + exportFunc = func() (sdktrace.SpanExporter, error) { + return otlpGRPCSpanExporter(ctx, exporter.OTLPGrpc) + } + } + if exporter.OTLPFileDevelopment != nil { + exportersConfigured++ + // TODO: implement file exporter + } + if exporter.Zipkin != nil { + exportersConfigured++ + // TODO: implement zipkin exporter + } + + if exportersConfigured > 1 { + return nil, errors.New("must not specify multiple exporters") + } + + if exportFunc != nil { + return exportFunc() + } + return nil, errors.New("no valid span exporter") +} + +func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanProcessor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, errors.New("must not specify multiple span processor type") + } + if processor.Batch != nil { + exp, err := spanExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchSpanProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := spanExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdktrace.NewSimpleSpanProcessor(exp), nil + } + return nil, errors.New("unsupported span processor type, must be one of simple or batch") +} + +func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLPGrpcExporter) (sdktrace.SpanExporter, error) { + var opts []otlptracegrpc.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, err + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case. + if u.Host != "" { + opts = append(opts, otlptracegrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlptracegrpc.WithEndpoint(*otlpConfig.Endpoint)) + } + + if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Insecure != nil && *otlpConfig.Insecure) { + opts = append(opts, otlptracegrpc.WithInsecure()) + } + } + + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlptracegrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlptracegrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlptracegrpc.WithHeaders(headersConfig)) + } + + if otlpConfig.CertificateFile != nil || otlpConfig.ClientCertificateFile != nil || otlpConfig.ClientKeyFile != nil { + tlsConfig, err := createTLSConfig(otlpConfig.CertificateFile, otlpConfig.ClientCertificateFile, otlpConfig.ClientKeyFile) + if err != nil { + return nil, err + } + opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) + } + + return otlptracegrpc.New(ctx, opts...) +} + +func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLPHttpExporter) (sdktrace.SpanExporter, error) { + var opts []otlptracehttp.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlptracehttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlptracehttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlptracehttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlptracehttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlptracehttp.WithHeaders(headersConfig)) + } + + tlsConfig, err := createTLSConfig(otlpConfig.CertificateFile, otlpConfig.ClientCertificateFile, otlpConfig.ClientKeyFile) + if err != nil { + return nil, err + } + opts = append(opts, otlptracehttp.WithTLSClientConfig(tlsConfig)) + + return otlptracehttp.New(ctx, opts...) +} + +func batchSpanProcessor(bsp *BatchSpanProcessor, exp sdktrace.SpanExporter) (sdktrace.SpanProcessor, error) { + var opts []sdktrace.BatchSpanProcessorOption + if bsp.ExportTimeout != nil { + if *bsp.ExportTimeout < 0 { + return nil, fmt.Errorf("invalid export timeout %d", *bsp.ExportTimeout) + } + opts = append(opts, sdktrace.WithExportTimeout(time.Millisecond*time.Duration(*bsp.ExportTimeout))) + } + if bsp.MaxExportBatchSize != nil { + if *bsp.MaxExportBatchSize < 0 { + return nil, fmt.Errorf("invalid batch size %d", *bsp.MaxExportBatchSize) + } + opts = append(opts, sdktrace.WithMaxExportBatchSize(*bsp.MaxExportBatchSize)) + } + if bsp.MaxQueueSize != nil { + if *bsp.MaxQueueSize < 0 { + return nil, fmt.Errorf("invalid queue size %d", *bsp.MaxQueueSize) + } + opts = append(opts, sdktrace.WithMaxQueueSize(*bsp.MaxQueueSize)) + } + if bsp.ScheduleDelay != nil { + if *bsp.ScheduleDelay < 0 { + return nil, fmt.Errorf("invalid schedule delay %d", *bsp.ScheduleDelay) + } + opts = append(opts, sdktrace.WithBatchTimeout(time.Millisecond*time.Duration(*bsp.ScheduleDelay))) + } + return sdktrace.NewBatchSpanProcessor(exp, opts...), nil +} diff --git a/otelconf/v0.4.0/trace_test.go b/otelconf/v0.4.0/trace_test.go new file mode 100644 index 00000000000..845765ea5eb --- /dev/null +++ b/otelconf/v0.4.0/trace_test.go @@ -0,0 +1,806 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelconf + +import ( + "context" + "errors" + "path/filepath" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +func TestTracerPovider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider trace.TracerProvider + wantErr error + }{ + { + name: "no-tracer-provider-configured", + wantProvider: noop.NewTracerProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProviderJson{ + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{}, + Simple: &SimpleSpanProcessor{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: errors.Join(errors.New("must not specify multiple span processor type")), + }, + { + name: "multiple-errors-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProviderJson{ + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{}, + Simple: &SimpleSpanProcessor{}, + }, + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + OTLPGrpc: &OTLPGrpcExporter{}, + }, + }, + }, + }, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: errors.Join(errors.New("must not specify multiple span processor type"), errors.New("must not specify multiple exporters")), + }, + { + name: "invalid-sampler-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProviderJson{ + Processors: []SpanProcessor{ + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + Sampler: &Sampler{}, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: errors.Join(errInvalidSamplerConfiguration), + }, + } + for _, tt := range tests { + tp, shutdown, err := tracerProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, tp) + assert.Equal(t, tt.wantErr, err) + require.NoError(t, shutdown(context.Background())) + } +} + +func TestSpanProcessor(t *testing.T) { + consoleExporter, err := stdouttrace.New( + stdouttrace.WithPrettyPrint(), + ) + require.NoError(t, err) + ctx := context.Background() + otlpGRPCExporter, err := otlptracegrpc.New(ctx) + require.NoError(t, err) + otlpHTTPExporter, err := otlptracehttp.New(ctx) + require.NoError(t, err) + testCases := []struct { + name string + processor SpanProcessor + args any + wantErr string + wantProcessor sdktrace.SpanProcessor + }{ + { + name: "no processor", + wantErr: "unsupported span processor type, must be one of simple or batch", + }, + { + name: "multiple processor types", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{}, + }, + Simple: &SimpleSpanProcessor{}, + }, + wantErr: "must not specify multiple span processor type", + }, + { + name: "batch processor invalid exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{}, + }, + }, + wantErr: "no valid span exporter", + }, + { + name: "batch processor invalid batch size console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(-1), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErr: "invalid batch size -1", + }, + { + name: "batch processor invalid export timeout console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + ExportTimeout: ptr(-2), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErr: "invalid export timeout -2", + }, + { + name: "batch processor invalid queue size console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxQueueSize: ptr(-3), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErr: "invalid queue size -3", + }, + { + name: "batch processor invalid schedule delay console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + ScheduleDelay: ptr(-4), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErr: "invalid schedule delay -4", + }, + { + name: "batch processor with multiple exporters", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + OTLPGrpc: &OTLPGrpcExporter{}, + }, + }, + }, + wantErr: "must not specify multiple exporters", + }, + { + name: "batch processor console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(consoleExporter), + }, + { + name: "batch/otlp-grpc-exporter-no-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("http://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter-socket-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("unix:collector.sock"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-good-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-bad-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not create certificate authority chain from certificate", + }, + { + name: "batch/otlp-grpc-bad-client-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + ClientCertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + ClientKeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", + }, + { + name: "batch/otlp-grpc-bad-headerslist", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErr: "invalid headers list: invalid key: \"\"", + }, + { + name: "batch/otlp-grpc-exporter-no-scheme", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-invalid-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "parse \" \": invalid URI for request", + }, + { + name: "batch/otlp-grpc-invalid-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "unsupported compression \"invalid\"", + }, + { + name: "batch/otlp-http-exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-good-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-bad-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + CertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not create certificate authority chain from certificate", + }, + { + name: "batch/otlp-http-bad-client-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + ClientCertificateFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + ClientKeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", + }, + { + name: "batch/otlp-http-bad-headerslist", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErr: "invalid headers list: invalid key: \"\"", + }, + { + name: "batch/otlp-http-exporter-with-path", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-scheme", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "parse \" \": invalid URI for request", + }, + { + name: "batch/otlp-http-none-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(0), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(0), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErr: "unsupported compression \"invalid\"", + }, + { + name: "simple/no-exporter", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{}, + }, + }, + wantErr: "no valid span exporter", + }, + { + name: "simple/console-exporter", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantProcessor: sdktrace.NewSimpleSpanProcessor(consoleExporter), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := spanProcessor(context.Background(), tt.processor) + if tt.wantErr != "" { + require.Error(t, err) + require.Equal(t, tt.wantErr, err.Error()) + } else { + require.NoError(t, err) + } + if tt.wantProcessor == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) + var fieldName string + switch reflect.TypeOf(tt.wantProcessor).String() { + case "*trace.simpleSpanProcessor": + fieldName = "exporter" + default: + fieldName = "e" + } + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName(fieldName).Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + } + }) + } +} + +func TestSampler(t *testing.T) { + for _, tt := range []struct { + name string + sampler *Sampler + wantSampler sdktrace.Sampler + wantError error + }{ + { + name: "no sampler configuration, return default", + sampler: nil, + wantSampler: sdktrace.ParentBased(sdktrace.AlwaysSample()), + }, + { + name: "invalid sampler configuration, return error", + sampler: &Sampler{}, + wantSampler: nil, + wantError: errInvalidSamplerConfiguration, + }, + { + name: "sampler configuration always on", + sampler: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + wantSampler: sdktrace.AlwaysSample(), + }, + { + name: "sampler configuration always off", + sampler: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + wantSampler: sdktrace.NeverSample(), + }, + { + name: "sampler configuration trace ID ratio", + sampler: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.54), + }, + }, + wantSampler: sdktrace.TraceIDRatioBased(0.54), + }, + { + name: "sampler configuration trace ID ratio no ratio", + sampler: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{}, + }, + wantSampler: sdktrace.TraceIDRatioBased(1), + }, + { + name: "sampler configuration parent based no options", + sampler: &Sampler{ + ParentBased: &ParentBasedSampler{}, + }, + wantSampler: sdktrace.ParentBased(sdktrace.AlwaysSample()), + }, + { + name: "sampler configuration parent based many options", + sampler: &Sampler{ + ParentBased: &ParentBasedSampler{ + Root: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + RemoteParentNotSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + RemoteParentSampled: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.009), + }, + }, + LocalParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + LocalParentSampled: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.05), + }, + }, + }, + }, + wantSampler: sdktrace.ParentBased( + sdktrace.NeverSample(), + sdktrace.WithLocalParentNotSampled(sdktrace.NeverSample()), + sdktrace.WithLocalParentSampled(sdktrace.TraceIDRatioBased(0.05)), + sdktrace.WithRemoteParentNotSampled(sdktrace.AlwaysSample()), + sdktrace.WithRemoteParentSampled(sdktrace.TraceIDRatioBased(0.009)), + ), + }, + { + name: "sampler configuration with many errors", + sampler: &Sampler{ + ParentBased: &ParentBasedSampler{ + Root: &Sampler{}, + RemoteParentNotSampled: &Sampler{}, + RemoteParentSampled: &Sampler{}, + LocalParentNotSampled: &Sampler{}, + LocalParentSampled: &Sampler{}, + }, + }, + wantError: errors.Join( + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + ), + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := sampler(tt.sampler) + if tt.wantError != nil { + require.Error(t, err) + require.EqualError(t, err, tt.wantError.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tt.wantSampler, got) + }) + } +}