Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions exporter/alertmanagerexporter/alertmanager_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func (s *alertmanagerExporter) pushTraces(ctx context.Context, td ptrace.Traces)
}

func (s *alertmanagerExporter) start(ctx context.Context, host component.Host) error {
client, err := s.config.ClientConfig.ToClient(ctx, host, s.settings)
client, err := s.config.ToClient(ctx, host, s.settings)
if err != nil {
return fmt.Errorf("failed to create HTTP Client: %w", err)
}
Expand All @@ -209,7 +209,7 @@ func newAlertManagerExporter(cfg *Config, set component.TelemetrySettings) *aler
config: cfg,
settings: set,
tracesMarshaler: &ptrace.JSONMarshaler{},
endpoint: fmt.Sprintf("%s/api/%s/alerts", cfg.ClientConfig.Endpoint, cfg.APIVersion),
endpoint: fmt.Sprintf("%s/api/%s/alerts", cfg.Endpoint, cfg.APIVersion),
generatorURL: cfg.GeneratorURL,
defaultSeverity: cfg.DefaultSeverity,
severityAttribute: cfg.SeverityAttribute,
Expand Down
2 changes: 1 addition & 1 deletion exporter/alertmanagerexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ var _ component.Config = (*Config)(nil)

// Validate checks if the exporter configuration is valid
func (cfg *Config) Validate() error {
if cfg.ClientConfig.Endpoint == "" {
if cfg.Endpoint == "" {
return errors.New("endpoint must be non-empty")
}
if cfg.DefaultSeverity == "" {
Expand Down
2 changes: 1 addition & 1 deletion exporter/alertmanagerexporter/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func TestConfig_Validate(t *testing.T) {
name: "NoEndpoint",
cfg: func() *Config {
cfg := createDefaultConfig().(*Config)
cfg.ClientConfig.Endpoint = ""
cfg.Endpoint = ""
return cfg
}(),
wantErr: "endpoint must be non-empty",
Expand Down
16 changes: 8 additions & 8 deletions exporter/awsemfexporter/datapoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func (split *dataPointSplit) appendMetricData(metricVal float64, count uint64) {

// CalculateDeltaDatapoints retrieves the NumberDataPoint at the given index and performs rate/delta calculation if necessary.
func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool, calculators *emfCalculators) ([]dataPoint, bool) {
metric := dps.NumberDataPointSlice.At(i)
metric := dps.At(i)
labels := createLabels(metric.Attributes(), instrumentationScopeName)
timestampMs := unixNanoToMilliseconds(metric.Timestamp())

Expand Down Expand Up @@ -177,7 +177,7 @@ func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationS
}

func (dps numberDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) {
metric := dps.NumberDataPointSlice.At(i)
metric := dps.At(i)
if metric.Flags().NoRecordedValue() {
return true, metric.Attributes()
}
Expand All @@ -189,7 +189,7 @@ func (dps numberDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) {

// CalculateDeltaDatapoints retrieves the HistogramDataPoint at the given index.
func (dps histogramDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool, _ *emfCalculators) ([]dataPoint, bool) {
metric := dps.HistogramDataPointSlice.At(i)
metric := dps.At(i)
labels := createLabels(metric.Attributes(), instrumentationScopeName)
timestamp := unixNanoToMilliseconds(metric.Timestamp())

Expand All @@ -207,7 +207,7 @@ func (dps histogramDataPointSlice) CalculateDeltaDatapoints(i int, instrumentati
}

func (dps histogramDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) {
metric := dps.HistogramDataPointSlice.At(i)
metric := dps.At(i)
if metric.Flags().NoRecordedValue() {
return true, metric.Attributes()
}
Expand All @@ -229,7 +229,7 @@ func (dps histogramDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) {
// - Sum is only assigned to the first split to ensure the total sum of the datapoints after aggregation is correct.
// - Count is accumulated based on the bucket counts within each split.
func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, instrumentationScopeName string, _ bool, _ *emfCalculators) ([]dataPoint, bool) {
metric := dps.ExponentialHistogramDataPointSlice.At(idx)
metric := dps.At(idx)

const splitThreshold = 100
currentBucketIndex := 0
Expand Down Expand Up @@ -412,7 +412,7 @@ func collectDatapointsWithNegativeBuckets(split *dataPointSplit, metric pmetric.
}

func (dps exponentialHistogramDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) {
metric := dps.ExponentialHistogramDataPointSlice.At(i)
metric := dps.At(i)
if metric.Flags().NoRecordedValue() {
return true, metric.Attributes()
}
Expand All @@ -430,7 +430,7 @@ func (dps exponentialHistogramDataPointSlice) IsStaleNaNInf(i int) (bool, pcommo

// CalculateDeltaDatapoints retrieves the SummaryDataPoint at the given index and perform calculation with sum and count while retain the quantile value.
func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, detailedMetrics bool, calculators *emfCalculators) ([]dataPoint, bool) {
metric := dps.SummaryDataPointSlice.At(i)
metric := dps.At(i)
labels := createLabels(metric.Attributes(), instrumentationScopeName)
timestampMs := unixNanoToMilliseconds(metric.Timestamp())

Expand Down Expand Up @@ -485,7 +485,7 @@ func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentation
}

func (dps summaryDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) {
metric := dps.SummaryDataPointSlice.At(i)
metric := dps.At(i)
if metric.Flags().NoRecordedValue() {
return true, metric.Attributes()
}
Expand Down
8 changes: 4 additions & 4 deletions exporter/awsemfexporter/datapoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2052,7 +2052,7 @@ func TestGetDataPoints(t *testing.T) {
expectedDPS := tc.expectedDatapointSlice.(numberDataPointSlice)
assert.Equal(t, expectedDPS.deltaMetricMetadata, convertedDPS.deltaMetricMetadata)
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.NumberDataPointSlice.At(0)
dp := convertedDPS.At(0)
switch dp.ValueType() {
case pmetric.NumberDataPointValueTypeDouble:
assert.Equal(t, 0.1, dp.DoubleValue())
Expand All @@ -2062,14 +2062,14 @@ func TestGetDataPoints(t *testing.T) {
assert.Equal(t, tc.expectedAttributes, dp.Attributes().AsRaw())
case histogramDataPointSlice:
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.HistogramDataPointSlice.At(0)
dp := convertedDPS.At(0)
assert.Equal(t, 35.0, dp.Sum())
assert.Equal(t, uint64(18), dp.Count())
assert.Equal(t, []float64{0, 10}, dp.ExplicitBounds().AsRaw())
assert.Equal(t, tc.expectedAttributes, dp.Attributes().AsRaw())
case exponentialHistogramDataPointSlice:
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.ExponentialHistogramDataPointSlice.At(0)
dp := convertedDPS.At(0)
assert.Equal(t, float64(0), dp.Sum())
assert.Equal(t, uint64(4), dp.Count())
assert.Equal(t, []uint64{1, 0, 1}, dp.Positive().BucketCounts().AsRaw())
Expand All @@ -2080,7 +2080,7 @@ func TestGetDataPoints(t *testing.T) {
expectedDPS := tc.expectedDatapointSlice.(summaryDataPointSlice)
assert.Equal(t, expectedDPS.deltaMetricMetadata, convertedDPS.deltaMetricMetadata)
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.SummaryDataPointSlice.At(0)
dp := convertedDPS.At(0)
assert.Equal(t, 15.0, dp.Sum())
assert.Equal(t, uint64(5), dp.Count())
assert.Equal(t, 2, dp.QuantileValues().Len())
Expand Down
2 changes: 1 addition & 1 deletion exporter/awsemfexporter/grouped_metric.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func addToGroupedMetric(
}

// Extra params to use when grouping metrics
metadata.groupedMetricMetadata.batchIndex = i
metadata.batchIndex = i
groupKey := aws.NewKey(metadata.groupedMetricMetadata, labels)
if _, ok := groupedMetrics[groupKey]; ok {
// if MetricName already exists in metrics map, print warning log
Expand Down
11 changes: 6 additions & 5 deletions exporter/awsemfexporter/grouped_metric_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,9 +309,10 @@ func TestAddToGroupedMetric(t *testing.T) {
}
assert.Equal(t, expectedLabels, group.labels)

if group.metadata.logGroup == "log-group-2" {
switch group.metadata.logGroup {
case "log-group-2":
seenLogGroup2 = true
} else if group.metadata.logGroup == "log-group-1" {
case "log-group-1":
seenLogGroup1 = true
}
}
Expand Down Expand Up @@ -441,10 +442,10 @@ func TestAddToGroupedMetric(t *testing.T) {
for _, v := range groupedMetrics {
assert.Len(t, v.metrics, 1)
assert.Len(t, v.labels, 2)
assert.Contains(t, expectedMetadata, v.metadata.groupedMetricMetadata.batchIndex)
assert.Equal(t, expectedMetadata[v.metadata.groupedMetricMetadata.batchIndex], v.metadata)
assert.Contains(t, expectedMetadata, v.metadata.batchIndex)
assert.Equal(t, expectedMetadata[v.metadata.batchIndex], v.metadata)
assert.Equal(t, expectedLabels, v.labels)
delete(expectedMetadata, v.metadata.groupedMetricMetadata.batchIndex)
delete(expectedMetadata, v.metadata.batchIndex)
}
})
}
Expand Down
8 changes: 4 additions & 4 deletions exporter/awsemfexporter/metric_translator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -348,16 +348,16 @@ func TestTranslateOtToGroupedMetric(t *testing.T) {

for _, v := range groupedMetrics {
assert.Equal(t, tc.expectedNamespace, v.metadata.namespace)
switch {
case v.metadata.metricDataType == pmetric.MetricTypeSum:
switch v.metadata.metricDataType {
case pmetric.MetricTypeSum:
assert.Len(t, v.metrics, 2)
assert.Equal(t, tc.counterLabels, v.labels)
assert.Equal(t, counterSumMetrics, v.metrics)
case v.metadata.metricDataType == pmetric.MetricTypeGauge:
case pmetric.MetricTypeGauge:
assert.Len(t, v.metrics, 2)
assert.Equal(t, tc.counterLabels, v.labels)
assert.Equal(t, counterGaugeMetrics, v.metrics)
case v.metadata.metricDataType == pmetric.MetricTypeHistogram:
case pmetric.MetricTypeHistogram:
assert.Len(t, v.metrics, 1)
assert.Equal(t, tc.timerLabels, v.labels)
assert.Equal(t, timerMetrics, v.metrics)
Expand Down
4 changes: 2 additions & 2 deletions exporter/awskinesisexporter/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func createExporter(ctx context.Context, c component.Config, log *zap.Logger, op
}

encoder, err := batch.NewEncoder(
conf.Encoding.Name,
conf.Name,
batch.WithMaxRecordSize(conf.MaxRecordSize),
batch.WithMaxRecordsPerBatch(conf.MaxRecordsPerBatch),
batch.WithCompressionType(conf.Compression),
Expand All @@ -99,7 +99,7 @@ func createExporter(ctx context.Context, c component.Config, log *zap.Logger, op
return nil, err
}

if conf.Encoding.Name == "otlp_json" {
if conf.Name == "otlp_json" {
log.Info("otlp_json is considered experimental and should not be used in a production environment")
}

Expand Down
2 changes: 1 addition & 1 deletion exporter/awss3exporter/sumo_marshaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func newSumoICMarshaler() sumoMarshaler {
}

func logEntry(buf *bytes.Buffer, format string, a ...any) {
buf.WriteString(fmt.Sprintf(format, a...))
fmt.Fprintf(buf, format, a...)
buf.WriteString("\n")
}

Expand Down
4 changes: 2 additions & 2 deletions exporter/awsxrayexporter/internal/translator/cause_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ func TestMakeCauseAwsSdkSpan(t *testing.T) {
assert.False(t, isThrottle)
assert.NotNil(t, cause)

assert.Len(t, cause.CauseObject.Exceptions, 1)
exception := cause.CauseObject.Exceptions[0]
assert.Len(t, cause.Exceptions, 1)
exception := cause.Exceptions[0]
assert.Equal(t, AwsIndividualHTTPErrorEventType, *exception.Type)
assert.True(t, *exception.Remote)

Expand Down
4 changes: 2 additions & 2 deletions exporter/awsxrayexporter/internal/translator/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ func constructClientURL(urlParts map[string]string) string {
}
}
url = scheme + "://" + host
if len(port) > 0 && !(scheme == "http" && port == "80") && !(scheme == "https" && port == "443") {
if len(port) > 0 && (scheme != "http" || port != "80") && (scheme != "https" || port != "443") {
url += ":" + port
}
target, ok := urlParts[conventionsv112.AttributeHTTPTarget]
Expand Down Expand Up @@ -246,7 +246,7 @@ func constructServerURL(urlParts map[string]string) string {
}
}
url = scheme + "://" + host
if len(port) > 0 && !(scheme == "http" && port == "80") && !(scheme == "https" && port == "443") {
if len(port) > 0 && (scheme != "http" || port != "80") && (scheme != "https" || port != "443") {
url += ":" + port
}
target, ok := urlParts[conventionsv112.AttributeHTTPTarget]
Expand Down
2 changes: 1 addition & 1 deletion exporter/awsxrayexporter/internal/translator/segment.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func MakeDependencySubsegmentForLocalRootDependencySpan(span ptrace.Span, resour

func MakeServiceSegmentForLocalRootDependencySpan(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool, serviceSegmentID pcommon.SpanID) (*awsxray.Segment, error) {
// We always create a segment for the service
var serviceSpan ptrace.Span = ptrace.NewSpan()
serviceSpan := ptrace.NewSpan()
span.CopyTo(serviceSpan)

// Set the span id to the one internally generated
Expand Down
2 changes: 1 addition & 1 deletion exporter/azuredataexplorerexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (adxCfg *Config) Validate() error {
return errors.New(`either ["application_id" , "application_key" , "tenant_id"] or ["managed_identity_id"] or ["use_azure_auth"] must be provided for auth`)
}

if !(adxCfg.IngestionType == managedIngestType || adxCfg.IngestionType == queuedIngestTest || isEmpty(adxCfg.IngestionType)) {
if adxCfg.IngestionType != managedIngestType && adxCfg.IngestionType != queuedIngestTest && !isEmpty(adxCfg.IngestionType) {
return fmt.Errorf("unsupported configuration for ingestion_type. Accepted types [%s, %s] Provided [%s]", managedIngestType, queuedIngestTest, adxCfg.IngestionType)
}
// Validate managed identity ID. Use system for system assigned managed identity or UserManagedIdentityID (objectID) for user assigned managed identity
Expand Down
2 changes: 1 addition & 1 deletion exporter/azuremonitorexporter/metricexporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ func TestSummaryEnvelopes(t *testing.T) {
}

func getDataPoint(tb testing.TB, metric pmetric.Metric) *contracts.DataPoint {
var envelopes []*contracts.Envelope = getMetricPacker().MetricToEnvelopes(metric, getResource(), getScope())
envelopes := getMetricPacker().MetricToEnvelopes(metric, getResource(), getScope())
require.Len(tb, envelopes, 1)
envelope := envelopes[0]
require.NotNil(tb, envelope)
Expand Down
5 changes: 3 additions & 2 deletions exporter/azuremonitorexporter/trace_to_envelope.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,15 +85,16 @@ func spanToEnvelopes(
envelope.Tags[contracts.UserId] = userID.Str()
}

if spanKind == ptrace.SpanKindServer || spanKind == ptrace.SpanKindConsumer {
switch spanKind {
case ptrace.SpanKindServer, ptrace.SpanKindConsumer:
requestData := spanToRequestData(span, incomingSpanType)
dataProperties = requestData.Properties
dataSanitizeFunc = requestData.Sanitize
envelope.Name = requestData.EnvelopeName("")
envelope.Tags[contracts.OperationName] = requestData.Name
data.BaseData = requestData
data.BaseType = requestData.BaseType()
} else if spanKind == ptrace.SpanKindClient || spanKind == ptrace.SpanKindProducer || spanKind == ptrace.SpanKindInternal {
case ptrace.SpanKindClient, ptrace.SpanKindProducer, ptrace.SpanKindInternal:
remoteDependencyData := spanToRemoteDependencyData(span, incomingSpanType)

// Regardless of the detected Span type, if the SpanKind is Internal we need to set data.Type to InProc
Expand Down
8 changes: 4 additions & 4 deletions exporter/coralogixexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,11 @@ func (c *Config) Validate() error {
}

// check if headers exists
if len(c.ClientConfig.Headers) == 0 {
c.ClientConfig.Headers = make(map[string]configopaque.String)
if len(c.Headers) == 0 {
c.Headers = make(map[string]configopaque.String)
}
c.ClientConfig.Headers["ACCESS_TOKEN"] = c.PrivateKey
c.ClientConfig.Headers["appName"] = configopaque.String(c.AppName)
c.Headers["ACCESS_TOKEN"] = c.PrivateKey
c.Headers["appName"] = configopaque.String(c.AppName)
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion exporter/coralogixexporter/logs_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (e *logsExporter) pushLogs(ctx context.Context, ld plog.Logs) error {
}

partialSuccess := resp.PartialSuccess()
if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedLogRecords() == 0) {
if partialSuccess.ErrorMessage() != "" || partialSuccess.RejectedLogRecords() != 0 {
e.settings.Logger.Error("Partial success response from Coralogix",
zap.String("message", partialSuccess.ErrorMessage()),
zap.Int64("rejected_log_records", partialSuccess.RejectedLogRecords()),
Expand Down
2 changes: 1 addition & 1 deletion exporter/coralogixexporter/metrics_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func (e *metricsExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) e
}

partialSuccess := resp.PartialSuccess()
if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedDataPoints() == 0) {
if partialSuccess.ErrorMessage() != "" || partialSuccess.RejectedDataPoints() != 0 {
e.settings.Logger.Error("Partial success response from Coralogix",
zap.String("message", partialSuccess.ErrorMessage()),
zap.Int64("rejected_data_points", partialSuccess.RejectedDataPoints()),
Expand Down
2 changes: 1 addition & 1 deletion exporter/coralogixexporter/profiles_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (e *profilesExporter) pushProfiles(ctx context.Context, md pprofile.Profile
}

partialSuccess := resp.PartialSuccess()
if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedProfiles() == 0) {
if partialSuccess.ErrorMessage() != "" || partialSuccess.RejectedProfiles() != 0 {
e.settings.Logger.Error("Partial success response from Coralogix",
zap.String("message", partialSuccess.ErrorMessage()),
zap.Int64("rejected_profiles", partialSuccess.RejectedProfiles()),
Expand Down
2 changes: 1 addition & 1 deletion exporter/coralogixexporter/traces_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (e *tracesExporter) pushTraces(ctx context.Context, td ptrace.Traces) error
}

partialSuccess := resp.PartialSuccess()
if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedSpans() == 0) {
if partialSuccess.ErrorMessage() != "" || partialSuccess.RejectedSpans() != 0 {
e.settings.Logger.Error("Partial success response from Coralogix",
zap.String("message", partialSuccess.ErrorMessage()),
zap.Int64("rejected_spans", partialSuccess.RejectedSpans()),
Expand Down
2 changes: 1 addition & 1 deletion exporter/datadogexporter/agent_components.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func newConfigComponent(set component.TelemetrySettings, cfg *datadogconfig.Conf
pkgconfig.Set("logs_config.batch_wait", cfg.Logs.BatchWait, pkgconfigmodel.SourceFile)
pkgconfig.Set("logs_config.use_compression", cfg.Logs.UseCompression, pkgconfigmodel.SourceFile)
pkgconfig.Set("logs_config.compression_level", cfg.Logs.CompressionLevel, pkgconfigmodel.SourceFile)
pkgconfig.Set("logs_config.logs_dd_url", cfg.Logs.TCPAddrConfig.Endpoint, pkgconfigmodel.SourceFile)
pkgconfig.Set("logs_config.logs_dd_url", cfg.Logs.Endpoint, pkgconfigmodel.SourceFile)
pkgconfig.Set("logs_config.auditor_ttl", pkgconfigsetup.DefaultAuditorTTL, pkgconfigmodel.SourceDefault)
pkgconfig.Set("logs_config.batch_max_content_size", pkgconfigsetup.DefaultBatchMaxContentSize, pkgconfigmodel.SourceDefault)
pkgconfig.Set("logs_config.batch_max_size", pkgconfigsetup.DefaultBatchMaxSize, pkgconfigmodel.SourceDefault)
Expand Down
6 changes: 3 additions & 3 deletions exporter/datadogexporter/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func (f *factory) createMetricsExporter(
set exporter.Settings,
c component.Config,
) (exporter.Metrics, error) {
cfg := checkAndCastConfig(c, set.TelemetrySettings.Logger)
cfg := checkAndCastConfig(c, set.Logger)
hostProvider, err := f.SourceProvider(set.TelemetrySettings, cfg.Hostname, cfg.HostMetadata.GetSourceTimeout())
if err != nil {
return nil, fmt.Errorf("failed to build hostname provider: %w", err)
Expand Down Expand Up @@ -404,7 +404,7 @@ func (f *factory) createTracesExporter(
set exporter.Settings,
c component.Config,
) (exporter.Traces, error) {
cfg := checkAndCastConfig(c, set.TelemetrySettings.Logger)
cfg := checkAndCastConfig(c, set.Logger)
if noAPMStatsFeatureGate.IsEnabled() {
set.Logger.Info(
"Trace metrics are now disabled in the Datadog Exporter by default. To continue receiving Trace Metrics, configure the Datadog Connector or disable the feature gate.",
Expand Down Expand Up @@ -506,7 +506,7 @@ func (f *factory) createLogsExporter(
set exporter.Settings,
c component.Config,
) (exporter.Logs, error) {
cfg := checkAndCastConfig(c, set.TelemetrySettings.Logger)
cfg := checkAndCastConfig(c, set.Logger)

if cfg.Logs.DumpPayloads && isLogsAgentExporterEnabled() {
set.Logger.Warn("logs::dump_payloads is not valid when the exporter.datadogexporter.UseLogsAgentExporter feature gate is enabled")
Expand Down
Loading