From 1ee197e462d4ccc2f53448ce03a02f793859c07e Mon Sep 17 00:00:00 2001 From: sreenathv Date: Mon, 3 Nov 2025 18:21:05 +0000 Subject: [PATCH 01/41] orcl collection window --- receiver/oracledbreceiver/config.go | 14 ++++++++++-- receiver/oracledbreceiver/scraper.go | 32 +++++++++++++++++----------- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/receiver/oracledbreceiver/config.go b/receiver/oracledbreceiver/config.go index cb59804bf5ec5..02c0adf7f40f4 100644 --- a/receiver/oracledbreceiver/config.go +++ b/receiver/oracledbreceiver/config.go @@ -9,6 +9,7 @@ import ( "net" "net/url" "strconv" + "time" "go.opentelemetry.io/collector/scraper/scraperhelper" "go.uber.org/multierr" @@ -29,8 +30,10 @@ var ( ) type TopQueryCollection struct { - MaxQuerySampleCount uint `mapstructure:"max_query_sample_count"` - TopQueryCount uint `mapstructure:"top_query_count"` + MaxQuerySampleCount uint `mapstructure:"max_query_sample_count"` + TopQueryCount uint `mapstructure:"top_query_count"` + CollectionInterval time.Duration `mapstructure:"collection_interval"` + LookbackTime time.Duration `mapstructure:"lookback_time"` } type QuerySample struct { @@ -106,3 +109,10 @@ func (c Config) Validate() error { } return allErrs } + +func (cfg *Config) EffectiveLookbackTime() time.Duration { + if cfg.LookbackTime <= 0 { + return 2 * cfg.ControllerConfig.CollectionInterval + } + return cfg.LookbackTime +} diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index 926e723e1ea8f..bc7f4dd5b24b9 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -133,19 +133,21 @@ type oracleScraper struct { obfuscator *obfuscator querySampleCfg QuerySample serviceInstanceID string + lastExecutionTimestamp time.Time } func newScraper(metricsBuilder *metadata.MetricsBuilder, metricsBuilderConfig metadata.MetricsBuilderConfig, scrapeCfg scraperhelper.ControllerConfig, logger *zap.Logger, providerFunc dbProviderFunc, clientProviderFunc clientProviderFunc, instanceName, hostName string) (scraper.Metrics, error) { s := &oracleScraper{ - mb: metricsBuilder, - metricsBuilderConfig: metricsBuilderConfig, - scrapeCfg: scrapeCfg, - logger: logger, - dbProviderFunc: providerFunc, - clientProviderFunc: clientProviderFunc, - instanceName: instanceName, - hostName: hostName, - serviceInstanceID: getInstanceID(instanceName, logger), + mb: metricsBuilder, + metricsBuilderConfig: metricsBuilderConfig, + scrapeCfg: scrapeCfg, + logger: logger, + dbProviderFunc: providerFunc, + clientProviderFunc: clientProviderFunc, + instanceName: instanceName, + hostName: hostName, + serviceInstanceID: getInstanceID(instanceName, logger), + lastExecutionTimestamp: time.Unix(0, 0), } return scraper.NewMetrics(s.scrape, scraper.WithShutdown(s.shutdown), scraper.WithStart(s.start)) } @@ -529,9 +531,11 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { var scrapeErrors []error if s.logsBuilderConfig.Events.DbServerTopQuery.Enabled { - topNCollectionErrors := s.collectTopNMetricData(ctx, logs) - if topNCollectionErrors != nil { - scrapeErrors = append(scrapeErrors, topNCollectionErrors) + if s.isTopNMetricsCollectionDue() { + topNCollectionErrors := s.collectTopNMetricData(ctx, logs) + if topNCollectionErrors != nil { + scrapeErrors = append(scrapeErrors, topNCollectionErrors) + } } } @@ -545,6 +549,10 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { return logs, errors.Join(scrapeErrors...) } +func (s *oracleScraper) isTopNMetricsCollectionDue() bool { + +} + func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Logs) error { var errs []error // get metrics and query texts from DB From 4d7c3594eb27bd6c20b87f13389cb8ea5f5beb6c Mon Sep 17 00:00:00 2001 From: sreenathv Date: Tue, 11 Nov 2025 09:21:57 +0000 Subject: [PATCH 02/41] Oracle topN collection interval implementation --- receiver/oracledbreceiver/config.go | 8 --- receiver/oracledbreceiver/factory.go | 1 + receiver/oracledbreceiver/scraper.go | 57 ++++++++++++------- .../oracleQueryMetricsAndTextSql.tmpl | 6 +- 4 files changed, 41 insertions(+), 31 deletions(-) diff --git a/receiver/oracledbreceiver/config.go b/receiver/oracledbreceiver/config.go index 02c0adf7f40f4..389d638766361 100644 --- a/receiver/oracledbreceiver/config.go +++ b/receiver/oracledbreceiver/config.go @@ -33,7 +33,6 @@ type TopQueryCollection struct { MaxQuerySampleCount uint `mapstructure:"max_query_sample_count"` TopQueryCount uint `mapstructure:"top_query_count"` CollectionInterval time.Duration `mapstructure:"collection_interval"` - LookbackTime time.Duration `mapstructure:"lookback_time"` } type QuerySample struct { @@ -109,10 +108,3 @@ func (c Config) Validate() error { } return allErrs } - -func (cfg *Config) EffectiveLookbackTime() time.Duration { - if cfg.LookbackTime <= 0 { - return 2 * cfg.ControllerConfig.CollectionInterval - } - return cfg.LookbackTime -} diff --git a/receiver/oracledbreceiver/factory.go b/receiver/oracledbreceiver/factory.go index 6302b10345248..8e753f2137221 100644 --- a/receiver/oracledbreceiver/factory.go +++ b/receiver/oracledbreceiver/factory.go @@ -50,6 +50,7 @@ func createDefaultConfig() component.Config { TopQueryCollection: TopQueryCollection{ MaxQuerySampleCount: 1000, TopQueryCount: 200, + CollectionInterval: 10 * time.Second, }, } } diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index bc7f4dd5b24b9..f82e4fb8a291e 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -11,6 +11,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "net" "os" "sort" @@ -134,20 +135,20 @@ type oracleScraper struct { querySampleCfg QuerySample serviceInstanceID string lastExecutionTimestamp time.Time + lastQueryMetricsDBTime string } func newScraper(metricsBuilder *metadata.MetricsBuilder, metricsBuilderConfig metadata.MetricsBuilderConfig, scrapeCfg scraperhelper.ControllerConfig, logger *zap.Logger, providerFunc dbProviderFunc, clientProviderFunc clientProviderFunc, instanceName, hostName string) (scraper.Metrics, error) { s := &oracleScraper{ - mb: metricsBuilder, - metricsBuilderConfig: metricsBuilderConfig, - scrapeCfg: scrapeCfg, - logger: logger, - dbProviderFunc: providerFunc, - clientProviderFunc: clientProviderFunc, - instanceName: instanceName, - hostName: hostName, - serviceInstanceID: getInstanceID(instanceName, logger), - lastExecutionTimestamp: time.Unix(0, 0), + mb: metricsBuilder, + metricsBuilderConfig: metricsBuilderConfig, + scrapeCfg: scrapeCfg, + logger: logger, + dbProviderFunc: providerFunc, + clientProviderFunc: clientProviderFunc, + instanceName: instanceName, + hostName: hostName, + serviceInstanceID: getInstanceID(instanceName, logger), } return scraper.NewMetrics(s.scrape, scraper.WithShutdown(s.shutdown), scraper.WithStart(s.start)) } @@ -531,8 +532,12 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { var scrapeErrors []error if s.logsBuilderConfig.Events.DbServerTopQuery.Enabled { - if s.isTopNMetricsCollectionDue() { - topNCollectionErrors := s.collectTopNMetricData(ctx, logs) + currentCollectionTime := time.Now() + + if int(math.Ceil(currentCollectionTime.Sub(s.lastExecutionTimestamp).Seconds())) < int(s.topQueryCollectCfg.CollectionInterval.Seconds()) { + s.logger.Debug("Skipping the collection of top queries because the current time has not yet exceeded the last execution time plus the specified collection interval") + } else { + topNCollectionErrors := s.collectTopNMetricData(ctx, logs, currentCollectionTime) if topNCollectionErrors != nil { scrapeErrors = append(scrapeErrors, topNCollectionErrors) } @@ -549,17 +554,13 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { return logs, errors.Join(scrapeErrors...) } -func (s *oracleScraper) isTopNMetricsCollectionDue() bool { - -} - -func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Logs) error { +func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Logs, collectionTime time.Time) error { var errs []error // get metrics and query texts from DB - timestamp := pcommon.NewTimestampFromTime(time.Now()) - intervalSeconds := int(s.scrapeCfg.CollectionInterval.Seconds()) + lookbackTimeSeconds := s.calculateLookbackSeconds() + s.oracleQueryMetricsClient = s.clientProviderFunc(s.db, oracleQueryMetricsSQL, s.logger) - metricRows, metricError := s.oracleQueryMetricsClient.metricRows(ctx, intervalSeconds, s.topQueryCollectCfg.MaxQuerySampleCount) + metricRows, metricError := s.oracleQueryMetricsClient.metricRows(ctx, lookbackTimeSeconds, lookbackTimeSeconds, s.topQueryCollectCfg.MaxQuerySampleCount) if metricError != nil { return fmt.Errorf("error executing oracleQueryMetricsSQL: %w", metricError) @@ -625,6 +626,8 @@ func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Log // if cache updates is not equal to rows returned, that indicates there is problem somewhere s.logger.Debug("Cache update", zap.Int("update-count", cacheUpdates), zap.Int("new-size", s.metricCache.Len())) + s.lastExecutionTimestamp = collectionTime + fmt.Println(">>>>>> time set") if len(hits) == 0 { s.logger.Info("No log records for this scrape") return errors.Join(errs...) @@ -654,7 +657,7 @@ func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Log planString := string(planBytes) s.lb.RecordDbServerTopQueryEvent(context.Background(), - timestamp, + pcommon.NewTimestampFromTime(collectionTime), dbSystemNameVal, s.hostName, hit.queryText, @@ -880,3 +883,15 @@ func constructInstanceID(host, port, service string) string { } return fmt.Sprintf("%s:%s", host, port) } + +func (s *oracleScraper) calculateLookbackSeconds() int { + if s.lastExecutionTimestamp.IsZero() { + return int(s.topQueryCollectCfg.CollectionInterval.Seconds()) + } + + const vsqlRefreshLagSec = 10 * time.Second // Buffer to account for v$sql maximum refresh latency + + return int(math.Ceil(time.Now(). + Add(vsqlRefreshLagSec). + Sub(s.lastExecutionTimestamp).Seconds())) +} diff --git a/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl b/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl index 5dc0a2f4f0e56..586486d162155 100644 --- a/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl +++ b/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl @@ -1,4 +1,6 @@ /* otel-collector */ SELECT +SYSDATE, +SYSDATE - NUMTODSINTERVAL(:1, 'SECOND') AS START_POINT, SQL_ID, SQL_FULLTEXT, CHILD_NUMBER, @@ -20,5 +22,5 @@ DISK_READS, DIRECT_WRITES, DIRECT_READS FROM V$SQL -WHERE LAST_ACTIVE_TIME >= SYSDATE - NUMTODSINTERVAL(:1, 'SECOND') -FETCH FIRST :2 ROWS ONLY \ No newline at end of file +WHERE LAST_ACTIVE_TIME >= SYSDATE - NUMTODSINTERVAL(:2, 'SECOND') +FETCH FIRST :3 ROWS ONLY \ No newline at end of file From 476859547cfd13bde23d11a5052f11a718ddca48 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Wed, 12 Nov 2025 12:02:04 +0000 Subject: [PATCH 03/41] cleanup --- receiver/oracledbreceiver/factory.go | 2 +- receiver/oracledbreceiver/scraper.go | 3 +-- .../templates/oracleQueryMetricsAndTextSql.tmpl | 5 ++--- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/receiver/oracledbreceiver/factory.go b/receiver/oracledbreceiver/factory.go index 8e753f2137221..4437e2273e0da 100644 --- a/receiver/oracledbreceiver/factory.go +++ b/receiver/oracledbreceiver/factory.go @@ -50,7 +50,7 @@ func createDefaultConfig() component.Config { TopQueryCollection: TopQueryCollection{ MaxQuerySampleCount: 1000, TopQueryCount: 200, - CollectionInterval: 10 * time.Second, + CollectionInterval: time.Minute, }, } } diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index f82e4fb8a291e..07d8714e19a28 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -560,7 +560,7 @@ func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Log lookbackTimeSeconds := s.calculateLookbackSeconds() s.oracleQueryMetricsClient = s.clientProviderFunc(s.db, oracleQueryMetricsSQL, s.logger) - metricRows, metricError := s.oracleQueryMetricsClient.metricRows(ctx, lookbackTimeSeconds, lookbackTimeSeconds, s.topQueryCollectCfg.MaxQuerySampleCount) + metricRows, metricError := s.oracleQueryMetricsClient.metricRows(ctx, lookbackTimeSeconds, s.topQueryCollectCfg.MaxQuerySampleCount) if metricError != nil { return fmt.Errorf("error executing oracleQueryMetricsSQL: %w", metricError) @@ -627,7 +627,6 @@ func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Log s.logger.Debug("Cache update", zap.Int("update-count", cacheUpdates), zap.Int("new-size", s.metricCache.Len())) s.lastExecutionTimestamp = collectionTime - fmt.Println(">>>>>> time set") if len(hits) == 0 { s.logger.Info("No log records for this scrape") return errors.Join(errs...) diff --git a/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl b/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl index 586486d162155..77d0ca0fdc647 100644 --- a/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl +++ b/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl @@ -1,6 +1,5 @@ /* otel-collector */ SELECT SYSDATE, -SYSDATE - NUMTODSINTERVAL(:1, 'SECOND') AS START_POINT, SQL_ID, SQL_FULLTEXT, CHILD_NUMBER, @@ -22,5 +21,5 @@ DISK_READS, DIRECT_WRITES, DIRECT_READS FROM V$SQL -WHERE LAST_ACTIVE_TIME >= SYSDATE - NUMTODSINTERVAL(:2, 'SECOND') -FETCH FIRST :3 ROWS ONLY \ No newline at end of file +WHERE LAST_ACTIVE_TIME >= SYSDATE - NUMTODSINTERVAL(:1, 'SECOND') +FETCH FIRST :2 ROWS ONLY \ No newline at end of file From 065e43701aaec4a519cb60131f08f32160b5f95d Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 14 Nov 2025 11:15:26 +0000 Subject: [PATCH 04/41] cleanup --- receiver/oracledbreceiver/config_test.go | 2 +- receiver/oracledbreceiver/scraper.go | 3 +-- .../templates/oracleQueryMetricsAndTextSql.tmpl | 1 - 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/receiver/oracledbreceiver/config_test.go b/receiver/oracledbreceiver/config_test.go index 6573094d67603..215c02f9845d2 100644 --- a/receiver/oracledbreceiver/config_test.go +++ b/receiver/oracledbreceiver/config_test.go @@ -117,7 +117,7 @@ func TestValidateInvalidConfigs(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { cfg := createDefaultConfig().(*Config) - assert.Equal(t, 10*time.Second, cfg.CollectionInterval) + assert.Equal(t, 10*time.Second, cfg.ControllerConfig.CollectionInterval) } func TestParseConfig(t *testing.T) { diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index 07d8714e19a28..8b4083234ac88 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -533,7 +533,6 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { if s.logsBuilderConfig.Events.DbServerTopQuery.Enabled { currentCollectionTime := time.Now() - if int(math.Ceil(currentCollectionTime.Sub(s.lastExecutionTimestamp).Seconds())) < int(s.topQueryCollectCfg.CollectionInterval.Seconds()) { s.logger.Debug("Skipping the collection of top queries because the current time has not yet exceeded the last execution time plus the specified collection interval") } else { @@ -626,7 +625,6 @@ func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Log // if cache updates is not equal to rows returned, that indicates there is problem somewhere s.logger.Debug("Cache update", zap.Int("update-count", cacheUpdates), zap.Int("new-size", s.metricCache.Len())) - s.lastExecutionTimestamp = collectionTime if len(hits) == 0 { s.logger.Info("No log records for this scrape") return errors.Join(errs...) @@ -685,6 +683,7 @@ func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Log s.logger.Debug("Log records for this scrape", zap.Int("count", hitCount)) } + s.lastExecutionTimestamp = collectionTime s.lb.Emit(metadata.WithLogsResource(rb.Emit())).ResourceLogs().MoveAndAppendTo(logs.ResourceLogs()) return errors.Join(errs...) diff --git a/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl b/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl index 77d0ca0fdc647..5dc0a2f4f0e56 100644 --- a/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl +++ b/receiver/oracledbreceiver/templates/oracleQueryMetricsAndTextSql.tmpl @@ -1,5 +1,4 @@ /* otel-collector */ SELECT -SYSDATE, SQL_ID, SQL_FULLTEXT, CHILD_NUMBER, From 13592ecb3978d1a6d06e2e24fb51e766ace00e22 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 14 Nov 2025 11:42:37 +0000 Subject: [PATCH 05/41] logging correction --- receiver/oracledbreceiver/scraper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index 8b4083234ac88..682e8ca08756c 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -534,7 +534,7 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { if s.logsBuilderConfig.Events.DbServerTopQuery.Enabled { currentCollectionTime := time.Now() if int(math.Ceil(currentCollectionTime.Sub(s.lastExecutionTimestamp).Seconds())) < int(s.topQueryCollectCfg.CollectionInterval.Seconds()) { - s.logger.Debug("Skipping the collection of top queries because the current time has not yet exceeded the last execution time plus the specified collection interval") + s.logger.Debug("Skipping the collection of top queries because collection interval not yet elapsed.") } else { topNCollectionErrors := s.collectTopNMetricData(ctx, logs, currentCollectionTime) if topNCollectionErrors != nil { From 69b839b0dc995e681a924cb2b7cb0e133ab4c0c6 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 14 Nov 2025 11:57:16 +0000 Subject: [PATCH 06/41] tests todo --- receiver/oracledbreceiver/scraper_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 4300a1fc512f0..74545de044a04 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -426,6 +426,10 @@ func TestGetInstanceId(t *testing.T) { assert.Equal(t, "unknown:1521", localInstanceID) } +func testCalculateLookbackSeconds(t *testing.T) { + //TODO +} + func readFile(fname string) []byte { file, err := os.ReadFile(filepath.Join("testdata", fname)) if err != nil { From e61577d3ec70a4e3d6dba05f57edebe57865968b Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 14 Nov 2025 14:34:55 +0000 Subject: [PATCH 07/41] updating unit tests --- receiver/oracledbreceiver/scraper_test.go | 100 +++++++++++++++++++++- 1 file changed, 98 insertions(+), 2 deletions(-) diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 74545de044a04..3ee20fca33a22 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -12,6 +12,7 @@ import ( "path/filepath" "strings" "testing" + "time" lru "github.com/hashicorp/golang-lru/v2" "github.com/stretchr/testify/assert" @@ -291,6 +292,7 @@ func TestScraper_ScrapeTopNLogs(t *testing.T) { }() require.NoError(t, err) expectedQueryPlanFile := filepath.Join("testdata", "expectedQueryTextAndPlanQuery.yaml") + collectionTriggerTime := time.Now() logs, err := scrpr.scrapeLogs(t.Context()) @@ -304,6 +306,8 @@ func TestScraper_ScrapeTopNLogs(t *testing.T) { assert.NoError(t, errs) assert.Equal(t, "db.server.top_query", logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).EventName()) assert.NoError(t, errs) + + assert.True(t, scrpr.lastExecutionTimestamp.After(collectionTriggerTime), "lastExecutionTimestamp hasn't set after a successful collection") } }) } @@ -426,8 +430,100 @@ func TestGetInstanceId(t *testing.T) { assert.Equal(t, "unknown:1521", localInstanceID) } -func testCalculateLookbackSeconds(t *testing.T) { - //TODO +func TestScrapesTopNLogsOnlyWhenIntervalHasElapsed(t *testing.T) { + var metricRowData []metricRow + var logRowData []metricRow + tests := []struct { + name string + dbclientFn func(db *sql.DB, s string, logger *zap.Logger) dbClient + errWanted string + }{ + { + name: "valid collection", + dbclientFn: func(_ *sql.DB, s string, _ *zap.Logger) dbClient { + if strings.Contains(s, "V$SQL_PLAN") { + metricRowFile := readFile("oracleQueryPlanData.txt") + unmarshalErr := json.Unmarshal(metricRowFile, &logRowData) + if unmarshalErr == nil { + return &fakeDbClient{ + Responses: [][]metricRow{ + logRowData, + }, + } + } + } else { + metricRowFile := readFile("oracleQueryMetricsData.txt") + unmarshalErr := json.Unmarshal(metricRowFile, &metricRowData) + if unmarshalErr == nil { + return &fakeDbClient{ + Responses: [][]metricRow{ + metricRowData, + }, + } + } + } + return nil + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + logsCfg := metadata.DefaultLogsBuilderConfig() + logsCfg.Events.DbServerTopQuery.Enabled = true + metricsCfg := metadata.DefaultMetricsBuilderConfig() + lruCache, _ := lru.New[string, map[string]int64](500) + lruCache.Add("fxk8aq3nds8aw:0", cacheValue) + + scrpr := oracleScraper{ + logger: zap.NewNop(), + mb: metadata.NewMetricsBuilder(metricsCfg, receivertest.NewNopSettings(metadata.Type)), + lb: metadata.NewLogsBuilder(logsCfg, receivertest.NewNopSettings(metadata.Type)), + dbProviderFunc: func() (*sql.DB, error) { + return nil, nil + }, + clientProviderFunc: test.dbclientFn, + metricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), + logsBuilderConfig: metadata.DefaultLogsBuilderConfig(), + metricCache: lruCache, + topQueryCollectCfg: TopQueryCollection{MaxQuerySampleCount: 5000, TopQueryCount: 200}, + obfuscator: newObfuscator(), + } + + scrpr.logsBuilderConfig.Events.DbServerTopQuery.Enabled = true + scrpr.topQueryCollectCfg.CollectionInterval = 1 * time.Minute + + err := scrpr.start(t.Context(), componenttest.NewNopHost()) + defer func() { + assert.NoError(t, scrpr.shutdown(t.Context())) + }() + require.NoError(t, err) + + assert.True(t, scrpr.lastExecutionTimestamp.IsZero(), "No value should be set for lastExecutionTimestamp before a successful collection") + logsCol1, _ := scrpr.scrapeLogs(t.Context()) + assert.Equal(t, 1, logsCol1.ResourceLogs().At(0).ScopeLogs().Len(), "Collection should run when lastExecutionTimestamp is not available") + assert.False(t, scrpr.lastExecutionTimestamp.IsZero(), "A value should be set for lastExecutionTimestamp after a successful collection") + + scrpr.lastExecutionTimestamp.Add(-10 * time.Second) + logsCol2, err := scrpr.scrapeLogs(t.Context()) + assert.Equal(t, 0, logsCol2.ResourceLogs().Len(), "top_query should not be collected until %s elapsed.", scrpr.topQueryCollectCfg.CollectionInterval.String()) + require.NoError(t, err, "coll") + }) + } +} + +func TestCalculateLookbackSeconds(t *testing.T) { + collectionInterval := 20 * time.Second + vsqlRefreshLagSec := 10 * time.Second + expectedMinimumLookbackTime := int((collectionInterval + vsqlRefreshLagSec).Seconds()) + currentCollectionTime := time.Now() + + scrpr := oracleScraper{ + lastExecutionTimestamp: currentCollectionTime.Add(-collectionInterval), + } + lookbackTime := scrpr.calculateLookbackSeconds() + + assert.LessOrEqual(t, expectedMinimumLookbackTime, lookbackTime, "`lookbackTime` should be minimum %d", expectedMinimumLookbackTime) } func readFile(fname string) []byte { From 693effcf3b20116a18d3f254b38ca51faa6ae61a Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 14 Nov 2025 14:54:24 +0000 Subject: [PATCH 08/41] cleanup --- receiver/oracledbreceiver/scraper.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index 682e8ca08756c..a9f4b86bbebd9 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -533,10 +533,11 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { if s.logsBuilderConfig.Events.DbServerTopQuery.Enabled { currentCollectionTime := time.Now() - if int(math.Ceil(currentCollectionTime.Sub(s.lastExecutionTimestamp).Seconds())) < int(s.topQueryCollectCfg.CollectionInterval.Seconds()) { + lookbackTimeCounter := s.calculateLookbackSeconds() + if lookbackTimeCounter < int(s.topQueryCollectCfg.CollectionInterval.Seconds()) { s.logger.Debug("Skipping the collection of top queries because collection interval not yet elapsed.") } else { - topNCollectionErrors := s.collectTopNMetricData(ctx, logs, currentCollectionTime) + topNCollectionErrors := s.collectTopNMetricData(ctx, logs, currentCollectionTime, lookbackTimeCounter) if topNCollectionErrors != nil { scrapeErrors = append(scrapeErrors, topNCollectionErrors) } @@ -553,11 +554,9 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { return logs, errors.Join(scrapeErrors...) } -func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Logs, collectionTime time.Time) error { +func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Logs, collectionTime time.Time, lookbackTimeSeconds int) error { var errs []error // get metrics and query texts from DB - lookbackTimeSeconds := s.calculateLookbackSeconds() - s.oracleQueryMetricsClient = s.clientProviderFunc(s.db, oracleQueryMetricsSQL, s.logger) metricRows, metricError := s.oracleQueryMetricsClient.metricRows(ctx, lookbackTimeSeconds, s.topQueryCollectCfg.MaxQuerySampleCount) From ab27addf282c23d0269348a866d395b07c22071a Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 14 Nov 2025 15:05:28 +0000 Subject: [PATCH 09/41] cleanup --- receiver/oracledbreceiver/scraper.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index a9f4b86bbebd9..bf08362845fb2 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -135,7 +135,6 @@ type oracleScraper struct { querySampleCfg QuerySample serviceInstanceID string lastExecutionTimestamp time.Time - lastQueryMetricsDBTime string } func newScraper(metricsBuilder *metadata.MetricsBuilder, metricsBuilderConfig metadata.MetricsBuilderConfig, scrapeCfg scraperhelper.ControllerConfig, logger *zap.Logger, providerFunc dbProviderFunc, clientProviderFunc clientProviderFunc, instanceName, hostName string) (scraper.Metrics, error) { @@ -535,7 +534,7 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { currentCollectionTime := time.Now() lookbackTimeCounter := s.calculateLookbackSeconds() if lookbackTimeCounter < int(s.topQueryCollectCfg.CollectionInterval.Seconds()) { - s.logger.Debug("Skipping the collection of top queries because collection interval not yet elapsed.") + s.logger.Debug("Skipping the collection of top queries because collection interval has not yet elapsed.") } else { topNCollectionErrors := s.collectTopNMetricData(ctx, logs, currentCollectionTime, lookbackTimeCounter) if topNCollectionErrors != nil { From 5d1fdc78726af6df3ce62538696cf979d9d51434 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Mon, 17 Nov 2025 13:13:42 +0000 Subject: [PATCH 10/41] fix tests --- receiver/oracledbreceiver/scraper_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 3ee20fca33a22..11b1f3dc99fb0 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -504,10 +504,10 @@ func TestScrapesTopNLogsOnlyWhenIntervalHasElapsed(t *testing.T) { assert.Equal(t, 1, logsCol1.ResourceLogs().At(0).ScopeLogs().Len(), "Collection should run when lastExecutionTimestamp is not available") assert.False(t, scrpr.lastExecutionTimestamp.IsZero(), "A value should be set for lastExecutionTimestamp after a successful collection") - scrpr.lastExecutionTimestamp.Add(-10 * time.Second) + scrpr.lastExecutionTimestamp = scrpr.lastExecutionTimestamp.Add(-10 * time.Second) logsCol2, err := scrpr.scrapeLogs(t.Context()) assert.Equal(t, 0, logsCol2.ResourceLogs().Len(), "top_query should not be collected until %s elapsed.", scrpr.topQueryCollectCfg.CollectionInterval.String()) - require.NoError(t, err, "coll") + require.NoError(t, err) }) } } From 09d800eb9f8b86dca5a487d904a05a79668e7bb7 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 21 Nov 2025 10:41:53 +0000 Subject: [PATCH 11/41] addressing review comments --- receiver/oracledbreceiver/scraper.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/receiver/oracledbreceiver/scraper.go b/receiver/oracledbreceiver/scraper.go index bf08362845fb2..071a04a5e6489 100644 --- a/receiver/oracledbreceiver/scraper.go +++ b/receiver/oracledbreceiver/scraper.go @@ -540,6 +540,7 @@ func (s *oracleScraper) scrapeLogs(ctx context.Context) (plog.Logs, error) { if topNCollectionErrors != nil { scrapeErrors = append(scrapeErrors, topNCollectionErrors) } + s.lastExecutionTimestamp = currentCollectionTime } } @@ -681,7 +682,6 @@ func (s *oracleScraper) collectTopNMetricData(ctx context.Context, logs plog.Log s.logger.Debug("Log records for this scrape", zap.Int("count", hitCount)) } - s.lastExecutionTimestamp = collectionTime s.lb.Emit(metadata.WithLogsResource(rb.Emit())).ResourceLogs().MoveAndAppendTo(logs.ResourceLogs()) return errors.Join(errs...) @@ -885,7 +885,9 @@ func (s *oracleScraper) calculateLookbackSeconds() int { return int(s.topQueryCollectCfg.CollectionInterval.Seconds()) } - const vsqlRefreshLagSec = 10 * time.Second // Buffer to account for v$sql maximum refresh latency + // vsqlRefreshLagSec is the buffer to account for v$sql maximum refresh latency (5 seconds) + 5 seconds to offset any collection delays. + // PS: https://docs.oracle.com/en/database/oracle/oracle-database/21/refrn/V-SQL.html + const vsqlRefreshLagSec = 10 * time.Second return int(math.Ceil(time.Now(). Add(vsqlRefreshLagSec). From 1bd33d87fd4ac8c2ddae6519de8b90f8f0cffc94 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 26 Nov 2025 09:48:51 -0800 Subject: [PATCH 12/41] fix(deps): update module github.com/redis/go-redis/v9 to v9.17.1 (#44527) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Confidence | |---|---|---|---| | [github.com/redis/go-redis/v9](https://redirect.github.com/redis/go-redis) | `v9.16.0` -> `v9.17.1` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fredis%2fgo-redis%2fv9/v9.17.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fredis%2fgo-redis%2fv9/v9.16.0/v9.17.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
redis/go-redis (github.com/redis/go-redis/v9) ### [`v9.17.1`](https://redirect.github.com/redis/go-redis/releases/tag/v9.17.1): 9.17.1 [Compare Source](https://redirect.github.com/redis/go-redis/compare/v9.17.0...v9.17.1) #### πŸ› Bug Fixes - add wait to keyless commands list ([#​3615](https://redirect.github.com/redis/go-redis/pull/3615)) by [@​marcoferrer](https://redirect.github.com/marcoferrer) - fix(time): remove cached time optimization ([#​3611](https://redirect.github.com/redis/go-redis/pull/3611)) by [@​ndyakov](https://redirect.github.com/ndyakov) #### 🧰 Maintenance - chore(deps): bump golangci/golangci-lint-action from 9.0.0 to 9.1.0 ([#​3609](https://redirect.github.com/redis/go-redis/pull/3609)) - chore(deps): bump actions/checkout from 5 to 6 ([#​3610](https://redirect.github.com/redis/go-redis/pull/3610)) - chore(script): fix help call in tag.sh ([#​3606](https://redirect.github.com/redis/go-redis/pull/3606)) by [@​ndyakov](https://redirect.github.com/ndyakov) #### Contributors We'd like to thank all the contributors who worked on this release! [@​marcoferrer](https://redirect.github.com/marcoferrer) and [@​ndyakov](https://redirect.github.com/ndyakov) ### [`v9.17.0`](https://redirect.github.com/redis/go-redis/releases/tag/v9.17.0): 9.17.0 [Compare Source](https://redirect.github.com/redis/go-redis/compare/v9.16.0...v9.17.0) #### πŸš€ Highlights ##### Redis 8.4 Support Added support for Redis 8.4, including new commands and features ([#​3572](https://redirect.github.com/redis/go-redis/pull/3572)) ##### Typed Errors Introduced typed errors for better error handling using `errors.As` instead of string checks. Errors can now be wrapped and set to commands in hooks without breaking library functionality ([#​3602](https://redirect.github.com/redis/go-redis/pull/3602)) ##### New Commands - **CAS/CAD Commands**: Added support for Compare-And-Set/Compare-And-Delete operations with conditional matching (`IFEQ`, `IFNE`, `IFDEQ`, `IFDNE`) ([#​3583](https://redirect.github.com/redis/go-redis/pull/3583), [#​3595](https://redirect.github.com/redis/go-redis/pull/3595)) - **MSETEX**: Atomically set multiple key-value pairs with expiration options and conditional modes ([#​3580](https://redirect.github.com/redis/go-redis/pull/3580)) - **XReadGroup CLAIM**: Consume both incoming and idle pending entries from streams in a single call ([#​3578](https://redirect.github.com/redis/go-redis/pull/3578)) - **ACL Commands**: Added `ACLGenPass`, `ACLUsers`, and `ACLWhoAmI` ([#​3576](https://redirect.github.com/redis/go-redis/pull/3576)) - **SLOWLOG Commands**: Added `SLOWLOG LEN` and `SLOWLOG RESET` ([#​3585](https://redirect.github.com/redis/go-redis/pull/3585)) - **LATENCY Commands**: Added `LATENCY LATEST` and `LATENCY RESET` ([#​3584](https://redirect.github.com/redis/go-redis/pull/3584)) ##### Search & Vector Improvements - **Hybrid Search**: Added **EXPERIMENTAL** support for the new `FT.HYBRID` command ([#​3573](https://redirect.github.com/redis/go-redis/pull/3573)) - **Vector Range**: Added `VRANGE` command for vector sets ([#​3543](https://redirect.github.com/redis/go-redis/pull/3543)) - **FT.INFO Enhancements**: Added vector-specific attributes in FT.INFO response ([#​3596](https://redirect.github.com/redis/go-redis/pull/3596)) ##### Connection Pool Improvements - **Improved Connection Success Rate**: Implemented FIFO queue-based fairness and context pattern for connection creation to prevent premature cancellation under high concurrency ([#​3518](https://redirect.github.com/redis/go-redis/pull/3518)) - **Connection State Machine**: Resolved race conditions and improved pool performance with proper state tracking ([#​3559](https://redirect.github.com/redis/go-redis/pull/3559)) - **Pool Performance**: Significant performance improvements with faster semaphores, lockless hook manager, and reduced allocations (47-67% faster Get/Put operations) ([#​3565](https://redirect.github.com/redis/go-redis/pull/3565)) ##### Metrics & Observability - **Canceled Metric Attribute**: Added 'canceled' metrics attribute to distinguish context cancellation errors from other errors ([#​3566](https://redirect.github.com/redis/go-redis/pull/3566)) #### ✨ New Features - Typed errors with wrapping support ([#​3602](https://redirect.github.com/redis/go-redis/pull/3602)) by [@​ndyakov](https://redirect.github.com/ndyakov) - CAS/CAD commands (marked as experimental) ([#​3583](https://redirect.github.com/redis/go-redis/pull/3583), [#​3595](https://redirect.github.com/redis/go-redis/pull/3595)) by [@​ndyakov](https://redirect.github.com/ndyakov), [@​htemelski-redis](https://redirect.github.com/htemelski-redis) - MSETEX command support ([#​3580](https://redirect.github.com/redis/go-redis/pull/3580)) by [@​ofekshenawa](https://redirect.github.com/ofekshenawa) - XReadGroup CLAIM argument ([#​3578](https://redirect.github.com/redis/go-redis/pull/3578)) by [@​ofekshenawa](https://redirect.github.com/ofekshenawa) - ACL commands: GenPass, Users, WhoAmI ([#​3576](https://redirect.github.com/redis/go-redis/pull/3576)) by [@​destinyoooo](https://redirect.github.com/destinyoooo) - SLOWLOG commands: LEN, RESET ([#​3585](https://redirect.github.com/redis/go-redis/pull/3585)) by [@​destinyoooo](https://redirect.github.com/destinyoooo) - LATENCY commands: LATEST, RESET ([#​3584](https://redirect.github.com/redis/go-redis/pull/3584)) by [@​destinyoooo](https://redirect.github.com/destinyoooo) - Hybrid search command (FT.HYBRID) ([#​3573](https://redirect.github.com/redis/go-redis/pull/3573)) by [@​htemelski-redis](https://redirect.github.com/htemelski-redis) - Vector range command (VRANGE) ([#​3543](https://redirect.github.com/redis/go-redis/pull/3543)) by [@​cxljs](https://redirect.github.com/cxljs) - Vector-specific attributes in FT.INFO ([#​3596](https://redirect.github.com/redis/go-redis/pull/3596)) by [@​ndyakov](https://redirect.github.com/ndyakov) - Improved connection pool success rate with FIFO queue ([#​3518](https://redirect.github.com/redis/go-redis/pull/3518)) by [@​cyningsun](https://redirect.github.com/cyningsun) - Canceled metrics attribute for context errors ([#​3566](https://redirect.github.com/redis/go-redis/pull/3566)) by [@​pvragov](https://redirect.github.com/pvragov) #### πŸ› Bug Fixes - Fixed Failover Client MaintNotificationsConfig ([#​3600](https://redirect.github.com/redis/go-redis/pull/3600)) by [@​ajax16384](https://redirect.github.com/ajax16384) - Fixed ACLGenPass function to use the bit parameter ([#​3597](https://redirect.github.com/redis/go-redis/pull/3597)) by [@​destinyoooo](https://redirect.github.com/destinyoooo) - Return error instead of panic from commands ([#​3568](https://redirect.github.com/redis/go-redis/pull/3568)) by [@​dragneelfps](https://redirect.github.com/dragneelfps) - Safety harness in `joinErrors` to prevent panic ([#​3577](https://redirect.github.com/redis/go-redis/pull/3577)) by [@​manisharma](https://redirect.github.com/manisharma) #### ⚑ Performance - Connection state machine with race condition fixes ([#​3559](https://redirect.github.com/redis/go-redis/pull/3559)) by [@​ndyakov](https://redirect.github.com/ndyakov) - Pool performance improvements: 47-67% faster Get/Put, 33% less memory, 50% fewer allocations ([#​3565](https://redirect.github.com/redis/go-redis/pull/3565)) by [@​ndyakov](https://redirect.github.com/ndyakov) #### πŸ§ͺ Testing & Infrastructure - Updated to Redis 8.4.0 image ([#​3603](https://redirect.github.com/redis/go-redis/pull/3603)) by [@​ndyakov](https://redirect.github.com/ndyakov) - Added Redis 8.4-RC1-pre to CI ([#​3572](https://redirect.github.com/redis/go-redis/pull/3572)) by [@​ndyakov](https://redirect.github.com/ndyakov) - Refactored tests for idiomatic Go ([#​3561](https://redirect.github.com/redis/go-redis/pull/3561), [#​3562](https://redirect.github.com/redis/go-redis/pull/3562), [#​3563](https://redirect.github.com/redis/go-redis/pull/3563)) by [@​12ya](https://redirect.github.com/12ya) #### πŸ‘₯ Contributors We'd like to thank all the contributors who worked on this release! [@​12ya](https://redirect.github.com/12ya), [@​ajax16384](https://redirect.github.com/ajax16384), [@​cxljs](https://redirect.github.com/cxljs), [@​cyningsun](https://redirect.github.com/cyningsun), [@​destinyoooo](https://redirect.github.com/destinyoooo), [@​dragneelfps](https://redirect.github.com/dragneelfps), [@​htemelski-redis](https://redirect.github.com/htemelski-redis), [@​manisharma](https://redirect.github.com/manisharma), [@​ndyakov](https://redirect.github.com/ndyakov), [@​ofekshenawa](https://redirect.github.com/ofekshenawa), [@​pvragov](https://redirect.github.com/pvragov) *** **Full Changelog**:
--- ### Configuration πŸ“… **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. β™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. πŸ”• **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com> --- extension/storage/redisstorageextension/go.mod | 2 +- extension/storage/redisstorageextension/go.sum | 4 ++-- receiver/redisreceiver/go.mod | 2 +- receiver/redisreceiver/go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/extension/storage/redisstorageextension/go.mod b/extension/storage/redisstorageextension/go.mod index 1c90a7cc81f5d..29a79d925a761 100644 --- a/extension/storage/redisstorageextension/go.mod +++ b/extension/storage/redisstorageextension/go.mod @@ -4,7 +4,7 @@ go 1.24.0 require ( github.com/go-redis/redismock/v9 v9.2.0 - github.com/redis/go-redis/v9 v9.16.0 + github.com/redis/go-redis/v9 v9.17.1 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/collector/component v1.46.1-0.20251120204106-2e9c82787618 go.opentelemetry.io/collector/component/componenttest v0.140.1-0.20251120204106-2e9c82787618 diff --git a/extension/storage/redisstorageextension/go.sum b/extension/storage/redisstorageextension/go.sum index abcc858318945..c6f5690e33f01 100644 --- a/extension/storage/redisstorageextension/go.sum +++ b/extension/storage/redisstorageextension/go.sum @@ -67,8 +67,8 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/redis/go-redis/v9 v9.16.0 h1:OotgqgLSRCmzfqChbQyG1PHC3tLNR89DG4jdOERSEP4= -github.com/redis/go-redis/v9 v9.16.0/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= +github.com/redis/go-redis/v9 v9.17.1 h1:7tl732FjYPRT9H9aNfyTwKg9iTETjWjGKEJ2t/5iWTs= +github.com/redis/go-redis/v9 v9.17.1/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/receiver/redisreceiver/go.mod b/receiver/redisreceiver/go.mod index c3775d1c9b875..c9bca8de557ab 100644 --- a/receiver/redisreceiver/go.mod +++ b/receiver/redisreceiver/go.mod @@ -6,7 +6,7 @@ require ( github.com/google/go-cmp v0.7.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.140.1 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.140.1 - github.com/redis/go-redis/v9 v9.16.0 + github.com/redis/go-redis/v9 v9.17.1 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.40.0 go.opentelemetry.io/collector/component v1.46.1-0.20251120204106-2e9c82787618 diff --git a/receiver/redisreceiver/go.sum b/receiver/redisreceiver/go.sum index 485a108ac023e..0a1db69efc591 100644 --- a/receiver/redisreceiver/go.sum +++ b/receiver/redisreceiver/go.sum @@ -132,8 +132,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/redis/go-redis/v9 v9.16.0 h1:OotgqgLSRCmzfqChbQyG1PHC3tLNR89DG4jdOERSEP4= -github.com/redis/go-redis/v9 v9.16.0/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= +github.com/redis/go-redis/v9 v9.17.1 h1:7tl732FjYPRT9H9aNfyTwKg9iTETjWjGKEJ2t/5iWTs= +github.com/redis/go-redis/v9 v9.17.1/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= From c09398291fcfa59a29355d27ffc1c9c43aefb01b Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Wed, 26 Nov 2025 09:49:35 -0800 Subject: [PATCH 13/41] [chore] add seeking new contributor to resource detection processor (#44558) As per 26-Nov-2025 SIG call Signed-off-by: alex boten <223565+codeboten@users.noreply.github.com> --- processor/resourcedetectionprocessor/README.md | 2 +- processor/resourcedetectionprocessor/metadata.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/processor/resourcedetectionprocessor/README.md b/processor/resourcedetectionprocessor/README.md index 05bc69fc9d3f1..2849946d4a90c 100644 --- a/processor/resourcedetectionprocessor/README.md +++ b/processor/resourcedetectionprocessor/README.md @@ -8,7 +8,7 @@ | Distributions | [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Fresourcedetection%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fresourcedetection) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Fresourcedetection%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fresourcedetection) | | Code coverage | [![codecov](https://codecov.io/github/open-telemetry/opentelemetry-collector-contrib/graph/main/badge.svg?component=processor_resourcedetection)](https://app.codecov.io/gh/open-telemetry/opentelemetry-collector-contrib/tree/main/?components%5B0%5D=processor_resourcedetection&displayType=list) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@Aneurysm9](https://www.github.com/Aneurysm9), [@dashpole](https://www.github.com/dashpole) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@Aneurysm9](https://www.github.com/Aneurysm9), [@dashpole](https://www.github.com/dashpole) \| Seeking more code owners! | [development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development [beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta diff --git a/processor/resourcedetectionprocessor/metadata.yaml b/processor/resourcedetectionprocessor/metadata.yaml index 9e98d1f5cbd70..20486ac54c082 100644 --- a/processor/resourcedetectionprocessor/metadata.yaml +++ b/processor/resourcedetectionprocessor/metadata.yaml @@ -8,3 +8,4 @@ status: distributions: [contrib, k8s] codeowners: active: [Aneurysm9, dashpole] + seeking_new: true From adacec40e2229c082421d7265459040e0f49d44b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 26 Nov 2025 09:50:50 -0800 Subject: [PATCH 14/41] chore(deps): update github-actions deps to v6 (major) (#44537) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [actions/checkout](https://redirect.github.com/actions/checkout) | action | major | `v5` -> `v6` | | [actions/checkout](https://redirect.github.com/actions/checkout) | action | major | `v5.0.1` -> `v6.0.0` | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
actions/checkout (actions/checkout) ### [`v6`](https://redirect.github.com/actions/checkout/compare/v5...v6) [Compare Source](https://redirect.github.com/actions/checkout/compare/v5...v6)
--- ### Configuration πŸ“… **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. β™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. πŸ”• **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/add-codeowners-to-pr.yml | 2 +- .github/workflows/add-labels.yml | 2 +- .../workflows/auto-update-jmx-component.yml | 8 ++--- .github/workflows/build-and-test-arm.yml | 6 ++-- .github/workflows/build-and-test-darwin.yaml | 6 ++-- .github/workflows/build-and-test-windows.yml | 8 ++--- .github/workflows/build-and-test.yml | 34 +++++++++---------- .github/workflows/changelog.yml | 2 +- .github/workflows/check-codeowners.yaml | 4 +-- .github/workflows/check-links.yaml | 4 +-- .github/workflows/check-lychee-config.yml | 2 +- .github/workflows/check-merge-freeze.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/e2e-tests-windows.yml | 6 ++-- .github/workflows/e2e-tests.yml | 8 ++--- .github/workflows/fossa.yml | 2 +- .../workflows/generate-component-labels.yml | 2 +- .github/workflows/generate-weekly-report.yml | 2 +- .github/workflows/golden.yml | 6 ++-- .github/workflows/lint-workflow-files.yml | 2 +- .github/workflows/load-tests.yml | 6 ++-- .github/workflows/mark-issues-as-stale.yml | 2 +- .github/workflows/ping-codeowners-issues.yml | 2 +- .../ping-codeowners-on-new-issue.yml | 2 +- .github/workflows/ping-codeowners-prs.yml | 2 +- .github/workflows/prepare-release.yml | 4 +-- .../workflows/prometheus-compliance-tests.yml | 4 +-- .github/workflows/scoped-test.yaml | 4 +-- .github/workflows/scorecard.yml | 2 +- .github/workflows/shellcheck.yaml | 2 +- .github/workflows/telemetrygen.yml | 6 ++-- .github/workflows/tidy-dependencies.yml | 2 +- .github/workflows/update-otel.yaml | 4 +-- 33 files changed, 76 insertions(+), 76 deletions(-) diff --git a/.github/workflows/add-codeowners-to-pr.yml b/.github/workflows/add-codeowners-to-pr.yml index 32f35621af7bd..744134180115e 100644 --- a/.github/workflows/add-codeowners-to-pr.yml +++ b/.github/workflows/add-codeowners-to-pr.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' && github.repository_owner == 'open-telemetry' && github.event.pull_request.draft == false }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Run add-codeowners-to-pr.sh run: ./.github/workflows/scripts/add-codeowners-to-pr.sh diff --git a/.github/workflows/add-labels.yml b/.github/workflows/add-labels.yml index d196693fc1ab9..4ae9bfaffa7ea 100644 --- a/.github/workflows/add-labels.yml +++ b/.github/workflows/add-labels.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Run update permissions run: chmod +x ./.github/workflows/scripts/add-labels.sh diff --git a/.github/workflows/auto-update-jmx-component.yml b/.github/workflows/auto-update-jmx-component.yml index 747b07a861d4f..1d1f2aa61896d 100644 --- a/.github/workflows/auto-update-jmx-component.yml +++ b/.github/workflows/auto-update-jmx-component.yml @@ -18,7 +18,7 @@ jobs: already-added: ${{ steps.check-jmx-metrics-version.outputs.already-added }} already-opened: ${{ steps.check-jmx-metrics-version.outputs.already-opened }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - id: check-jmx-metrics-version name: Check versions @@ -64,7 +64,7 @@ jobs: needs: - check-jmx-metrics-version steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Update version env: @@ -137,7 +137,7 @@ jobs: already-added: ${{ steps.check-jmx-scraper-version.outputs.already-added }} already-opened: ${{ steps.check-jmx-scraper-version.outputs.already-opened }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - id: check-jmx-scraper-version name: Check versions @@ -183,7 +183,7 @@ jobs: needs: - check-jmx-scraper-version steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Update version env: diff --git a/.github/workflows/build-and-test-arm.yml b/.github/workflows/build-and-test-arm.yml index a14e22dd61e96..36c747a1f5513 100644 --- a/.github/workflows/build-and-test-arm.yml +++ b/.github/workflows/build-and-test-arm.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-22.04-arm if: ${{ github.actor != 'dependabot[bot]' && (!contains(github.event.pull_request.labels.*.name, 'Skip ARM') || github.event_name == 'push' || github.event_name == 'merge_group') }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -64,7 +64,7 @@ jobs: timeout-minutes: 30 runs-on: ubuntu-22.04-arm steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -125,7 +125,7 @@ jobs: permissions: issues: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/build-and-test-darwin.yaml b/.github/workflows/build-and-test-darwin.yaml index fc3ed2789bbd3..dad2f699c1b18 100644 --- a/.github/workflows/build-and-test-darwin.yaml +++ b/.github/workflows/build-and-test-darwin.yaml @@ -31,7 +31,7 @@ jobs: runs-on: ${{ matrix.os }} if: ${{ github.actor != 'dependabot[bot]' && (contains(github.event.pull_request.labels.*.name, 'Run Darwin') || github.event_name == 'push' || github.event_name == 'merge_group') }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -55,7 +55,7 @@ jobs: runs-on: ${{ matrix.os }} timeout-minutes: 120 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -85,7 +85,7 @@ jobs: timeout-minutes: 30 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/build-and-test-windows.yml b/.github/workflows/build-and-test-windows.yml index 5725ba1a25897..b412825c15da3 100644 --- a/.github/workflows/build-and-test-windows.yml +++ b/.github/workflows/build-and-test-windows.yml @@ -32,7 +32,7 @@ jobs: os: [ windows-latest, windows-11-arm ] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -55,7 +55,7 @@ jobs: os: [ windows-latest, windows-11-arm ] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -107,7 +107,7 @@ jobs: # The gcc installed on the Windows arm64 runner doesn't ship native ARM libraries so we need to disable CGO. CGO_ENABLED: ${{ matrix.os == 'windows-11-arm' && '0' || '' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -182,7 +182,7 @@ jobs: permissions: issues: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 7929741ac5bc6..cd01ccc218a71 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -44,7 +44,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: make genotelcontribcol - name: Check Collector Module Version run: ./.github/workflows/scripts/check-collector-module-version.sh @@ -75,7 +75,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -132,7 +132,7 @@ jobs: needs: [setup-environment] steps: - name: Checkout Repo - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -152,7 +152,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -241,7 +241,7 @@ jobs: runs-on: ${{ matrix.runner }} needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -320,7 +320,7 @@ jobs: runs-on: ubuntu-24.04 needs: [unittest] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 with: merge-multiple: true @@ -353,7 +353,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -383,7 +383,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -423,7 +423,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -443,7 +443,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -464,7 +464,7 @@ jobs: runs-on: ubuntu-24.04 needs: [setup-environment] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - run: make genotelcontribcol - name: Build Examples @@ -513,7 +513,7 @@ jobs: - os: windows arch: s390x steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -541,7 +541,7 @@ jobs: runs-on: ubuntu-24.04 needs: [lint, unittest, integration-tests] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: sparse-checkout: | .github/workflows/scripts/verify-dist-files-exist.sh @@ -559,7 +559,7 @@ jobs: needs: [lint, unittest, integration-tests] if: (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) && github.repository == 'open-telemetry/opentelemetry-collector-contrib' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - name: Mkdir bin and dist run: | @@ -604,7 +604,7 @@ jobs: permissions: contents: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: fetch-depth: 0 @@ -689,7 +689,7 @@ jobs: permissions: issues: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 682ac0e355aff..42c3f56a890ec 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -30,7 +30,7 @@ jobs: PR_HEAD: ${{ github.event.pull_request.head.sha }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: # Fetch complete history depth only if the PR is not a chore. fetch-depth: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]') && '0' || '1' }} diff --git a/.github/workflows/check-codeowners.yaml b/.github/workflows/check-codeowners.yaml index f5223bb6608c5..d0e4c759ecd16 100644 --- a/.github/workflows/check-codeowners.yaml +++ b/.github/workflows/check-codeowners.yaml @@ -34,7 +34,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' && github.repository == 'open-telemetry/opentelemetry-collector-contrib' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -46,7 +46,7 @@ jobs: run: | make install-tools - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: ref: ${{github.event.pull_request.head.ref}} repository: ${{github.event.pull_request.head.repo.full_name}} diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml index f041ac45313fc..d16c94acc164b 100644 --- a/.github/workflows/check-links.yaml +++ b/.github/workflows/check-links.yaml @@ -21,7 +21,7 @@ jobs: outputs: files: ${{ steps.changes.outputs.files }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: fetch-depth: 0 - name: Get changed files @@ -40,7 +40,7 @@ jobs: needs: changedfiles if: ${{needs.changedfiles.outputs.files}} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: fetch-depth: 0 diff --git a/.github/workflows/check-lychee-config.yml b/.github/workflows/check-lychee-config.yml index a0ea9fe5b3eeb..b0e93532ca029 100644 --- a/.github/workflows/check-lychee-config.yml +++ b/.github/workflows/check-lychee-config.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest name: Validate Lychee Config steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Lychee Config Checker id: lychee diff --git a/.github/workflows/check-merge-freeze.yml b/.github/workflows/check-merge-freeze.yml index 8510ce4ef8524..03942c5e29c08 100644 --- a/.github/workflows/check-merge-freeze.yml +++ b/.github/workflows/check-merge-freeze.yml @@ -25,7 +25,7 @@ jobs: (!(github.event.pull_request.user.login == 'otelbot[bot]' || github.event.merge_group.head_commit.author.login == 'otelbot[bot]')) runs-on: ubuntu-latest steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: sparse-checkout: .github/workflows/scripts - run: ./.github/workflows/scripts/check-merge-freeze.sh diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 08a59dff13d93..4ba0dd0391404 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -23,7 +23,7 @@ jobs: CODEQL_EXTRACTOR_GO_BUILD_TRACING: "on" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/e2e-tests-windows.yml b/.github/workflows/e2e-tests-windows.yml index 1b6b4ad0a82d9..c2512691b2ac9 100644 --- a/.github/workflows/e2e-tests-windows.yml +++ b/.github/workflows/e2e-tests-windows.yml @@ -32,7 +32,7 @@ jobs: if: ${{ github.actor != 'dependabot[bot]' && (contains(github.event.pull_request.labels.*.name, 'Run Windows') || github.event_name == 'push' || github.event_name == 'merge_group') }} steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -68,7 +68,7 @@ jobs: CGO_ENABLED: ${{ matrix.os == 'windows-11-arm' && '0' || '' }} needs: [collector-build] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -102,7 +102,7 @@ jobs: needs: [collector-build] steps: - name: Checkout Repo - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml index de7eef49414dd..0fe93c46f54a9 100644 --- a/.github/workflows/e2e-tests.yml +++ b/.github/workflows/e2e-tests.yml @@ -26,7 +26,7 @@ jobs: if: ${{ github.actor != 'dependabot[bot]' }} steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -51,7 +51,7 @@ jobs: runs-on: ubuntu-24.04 needs: collector-build steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -77,7 +77,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -120,7 +120,7 @@ jobs: runs-on: ubuntu-24.04 needs: docker-build steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml index d9764f272b00c..518dd706f74f6 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest if: github.repository == 'open-telemetry/opentelemetry-collector-contrib' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0 with: diff --git a/.github/workflows/generate-component-labels.yml b/.github/workflows/generate-component-labels.yml index b519a4b18a14a..a47f102ce61cb 100644 --- a/.github/workflows/generate-component-labels.yml +++ b/.github/workflows/generate-component-labels.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Generate component labels run: ./.github/workflows/scripts/generate-component-labels.sh diff --git a/.github/workflows/generate-weekly-report.yml b/.github/workflows/generate-weekly-report.yml index df842075f79ae..eca7400bdf978 100644 --- a/.github/workflows/generate-weekly-report.yml +++ b/.github/workflows/generate-weekly-report.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: | cp ../configs/generate-weekly-report/package.json . cp ../configs/generate-weekly-report/package-lock.json . diff --git a/.github/workflows/golden.yml b/.github/workflows/golden.yml index 27572cc340794..c6ac567052e74 100644 --- a/.github/workflows/golden.yml +++ b/.github/workflows/golden.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -63,7 +63,7 @@ jobs: permissions: packages: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -105,7 +105,7 @@ jobs: permissions: packages: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/lint-workflow-files.yml b/.github/workflows/lint-workflow-files.yml index a9a6982f682fd..f8a59a05ad3bb 100644 --- a/.github/workflows/lint-workflow-files.yml +++ b/.github/workflows/lint-workflow-files.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/load-tests.yml b/.github/workflows/load-tests.yml index f52dea25e971d..f8c09375903de 100644 --- a/.github/workflows/load-tests.yml +++ b/.github/workflows/load-tests.yml @@ -28,7 +28,7 @@ jobs: outputs: loadtest_matrix: ${{ steps.splitloadtest.outputs.loadtest_matrix }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -60,7 +60,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.setup-environment.outputs.loadtest_matrix) }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -117,7 +117,7 @@ jobs: contents: write if: github.event_name != 'pull_request' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 with: pattern: benchmark-results-* diff --git a/.github/workflows/mark-issues-as-stale.yml b/.github/workflows/mark-issues-as-stale.yml index 5cef9e1ff38fb..c84318ce54d16 100644 --- a/.github/workflows/mark-issues-as-stale.yml +++ b/.github/workflows/mark-issues-as-stale.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Run mark-issues-as-stale.sh run: ./.github/workflows/scripts/mark-issues-as-stale.sh diff --git a/.github/workflows/ping-codeowners-issues.yml b/.github/workflows/ping-codeowners-issues.yml index 266bf315e9270..bb52cc342e3f7 100644 --- a/.github/workflows/ping-codeowners-issues.yml +++ b/.github/workflows/ping-codeowners-issues.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Run ping-codeowners-issues.sh run: ./.github/workflows/scripts/ping-codeowners-issues.sh diff --git a/.github/workflows/ping-codeowners-on-new-issue.yml b/.github/workflows/ping-codeowners-on-new-issue.yml index b268104cacc01..6bd1ea69025e4 100644 --- a/.github/workflows/ping-codeowners-on-new-issue.yml +++ b/.github/workflows/ping-codeowners-on-new-issue.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Run ping-codeowners-on-new-issue.sh run: ./.github/workflows/scripts/ping-codeowners-on-new-issue.sh diff --git a/.github/workflows/ping-codeowners-prs.yml b/.github/workflows/ping-codeowners-prs.yml index 914dd55803c59..8bcb5736036f3 100644 --- a/.github/workflows/ping-codeowners-prs.yml +++ b/.github/workflows/ping-codeowners-prs.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' && github.repository_owner == 'open-telemetry' && github.event.pull_request.draft == false }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: Run ping-codeowners-prs.sh run: ./.github/workflows/scripts/ping-codeowners-prs.sh diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml index ce3b3b21453df..a2ce3c28f222d 100644 --- a/.github/workflows/prepare-release.yml +++ b/.github/workflows/prepare-release.yml @@ -21,11 +21,11 @@ jobs: contents: write # required for pushing changes in release-prepare-release.sh runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: repository: "open-telemetry/opentelemetry-collector" path: opentelemetry-collector - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: path: opentelemetry-collector-contrib - run: opentelemetry-collector-contrib/.github/workflows/scripts/free-disk-space.sh diff --git a/.github/workflows/prometheus-compliance-tests.yml b/.github/workflows/prometheus-compliance-tests.yml index c40234ea4ef19..8502c23bcba53 100644 --- a/.github/workflows/prometheus-compliance-tests.yml +++ b/.github/workflows/prometheus-compliance-tests.yml @@ -29,7 +29,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: path: opentelemetry-collector-contrib - run: opentelemetry-collector-contrib/.github/workflows/scripts/free-disk-space.sh @@ -51,7 +51,7 @@ jobs: - run: make otelcontribcol working-directory: opentelemetry-collector-contrib - name: Checkout compliance repo - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: repository: prometheus/compliance path: compliance diff --git a/.github/workflows/scoped-test.yaml b/.github/workflows/scoped-test.yaml index 5a0472a7b5392..7f37d1cc4b84a 100644 --- a/.github/workflows/scoped-test.yaml +++ b/.github/workflows/scoped-test.yaml @@ -16,7 +16,7 @@ jobs: go_sources: ${{ steps.changes.outputs.go_sources }} go_tests: ${{ steps.changes.outputs.go_tests }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: fetch-depth: 0 @@ -68,7 +68,7 @@ jobs: run: | echo "go_sources: ${{ needs.changedfiles.outputs.go_sources }}" echo "go_tests: ${{ needs.changedfiles.outputs.go_tests }}" - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 155c0cece3509..a09d0890ed58f 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -30,7 +30,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: persist-credentials: false diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index 9fe35bf72807e..ad512e369ea21 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -14,7 +14,7 @@ jobs: env: VERSION: v0.10.0 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - name: shellcheck workflow-scripts uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 env: diff --git a/.github/workflows/telemetrygen.yml b/.github/workflows/telemetrygen.yml index 139f8747f7cea..8775388a020fe 100644 --- a/.github/workflows/telemetrygen.yml +++ b/.github/workflows/telemetrygen.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -67,7 +67,7 @@ jobs: permissions: packages: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup @@ -113,7 +113,7 @@ jobs: permissions: packages: write steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 - run: ./.github/workflows/scripts/free-disk-space.sh - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6 id: go-setup diff --git a/.github/workflows/tidy-dependencies.yml b/.github/workflows/tidy-dependencies.yml index 0084f8783e23d..46dedf02e8132 100644 --- a/.github/workflows/tidy-dependencies.yml +++ b/.github/workflows/tidy-dependencies.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-24.04 if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependency-major-update') && (github.actor == 'renovate[bot]' || contains(github.event.pull_request.labels.*.name, 'renovatebot')) }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: ref: ${{ github.head_ref }} - run: ./.github/workflows/scripts/free-disk-space.sh diff --git a/.github/workflows/update-otel.yaml b/.github/workflows/update-otel.yaml index 86132fc78f0c5..eaf7c47ac8942 100644 --- a/.github/workflows/update-otel.yaml +++ b/.github/workflows/update-otel.yaml @@ -15,11 +15,11 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.repository_owner == 'open-telemetry' }} steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: path: opentelemetry-collector-contrib - name: Pull the latest collector repo - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6 with: path: opentelemetry-collector repository: open-telemetry/opentelemetry-collector From 6ea62909b11695e11c2216b95424588079df5944 Mon Sep 17 00:00:00 2001 From: Christos Markou Date: Thu, 27 Nov 2025 00:18:45 +0200 Subject: [PATCH 15/41] [chore][k8sattributes] Remove unused Name field of Association struct (#44547) Follow up from https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/43939#discussion_r2561053316. The `Association.Name` field is not used anymore and can be removed. Note: It's inside the `internal/kube` pkg hence it's not a breaking change. Signed-off-by: ChrsMark --- .../internal/kube/client_test.go | 1 - processor/k8sattributesprocessor/internal/kube/kube.go | 1 - processor/k8sattributesprocessor/processor_test.go | 10 ---------- 3 files changed, 12 deletions(-) diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index 8e6eb6d1b37e5..d29ef1cf490f0 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -371,7 +371,6 @@ func TestPodCreate(t *testing.T) { func TestPodAddOutOfSync(t *testing.T) { c, _ := newTestClient(t) c.Associations = append(c.Associations, Association{ - Name: "name", Sources: []AssociationSource{ { From: ResourceSource, diff --git a/processor/k8sattributesprocessor/internal/kube/kube.go b/processor/k8sattributesprocessor/internal/kube/kube.go index 0472ddd545f43..7afa865852bd9 100644 --- a/processor/k8sattributesprocessor/internal/kube/kube.go +++ b/processor/k8sattributesprocessor/internal/kube/kube.go @@ -399,7 +399,6 @@ type Associations struct { // Association represents one association rule type Association struct { - Name string Sources []AssociationSource } diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index 0c3a8bf445b20..e2a09bb91a187 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -629,7 +629,6 @@ func TestIPSourceWithPodAssociation(t *testing.T) { m.kubernetesProcessorOperation(func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.ip", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -638,7 +637,6 @@ func TestIPSourceWithPodAssociation(t *testing.T) { }, }, { - Name: "k8s.pod.ip", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -647,7 +645,6 @@ func TestIPSourceWithPodAssociation(t *testing.T) { }, }, { - Name: "k8s.pod.ip", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -1085,7 +1082,6 @@ func TestProcessorAddContainerAttributes(t *testing.T) { op: func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.uid", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -1126,7 +1122,6 @@ func TestProcessorAddContainerAttributes(t *testing.T) { op: func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.uid", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -1164,7 +1159,6 @@ func TestProcessorAddContainerAttributes(t *testing.T) { op: func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.uid", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -1214,7 +1208,6 @@ func TestProcessorAddContainerAttributes(t *testing.T) { op: func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.uid", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -1387,7 +1380,6 @@ func TestProcessorAddContainerAttributes(t *testing.T) { op: func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.uid", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -1423,7 +1415,6 @@ func TestProcessorAddContainerAttributes(t *testing.T) { op: func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.uid", Sources: []kube.AssociationSource{ { From: "resource_attribute", @@ -1506,7 +1497,6 @@ func TestProcessorPicksUpPassthroughPodIp(t *testing.T) { m.kubernetesProcessorOperation(func(kp *kubernetesprocessor) { kp.podAssociations = []kube.Association{ { - Name: "k8s.pod.ip", Sources: []kube.AssociationSource{ { From: "resource_attribute", From 5a5730102547046beec49e6bc7f050aba6e0541a Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Thu, 27 Nov 2025 15:23:46 +0800 Subject: [PATCH 16/41] [chore] [exporter/elasticsearch] fix integration tests (#44566) #### Description The integration tests are broken, because they use otelcol but are not supplying a telemetry factory. #### Link to tracking issue Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/44311 #### Testing Tests pass locally, and are actually sending non-zero workload now. #### Documentation N/A --- exporter/elasticsearchexporter/integrationtest/collector.go | 2 ++ exporter/elasticsearchexporter/integrationtest/go.mod | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/exporter/elasticsearchexporter/integrationtest/collector.go b/exporter/elasticsearchexporter/integrationtest/collector.go index 3905159be542c..bff83432df60e 100644 --- a/exporter/elasticsearchexporter/integrationtest/collector.go +++ b/exporter/elasticsearchexporter/integrationtest/collector.go @@ -25,6 +25,7 @@ import ( "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/collector/service/telemetry/otelconftelemetry" "golang.org/x/sync/errgroup" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter" @@ -148,6 +149,7 @@ func newRecreatableOtelCol(tb testing.TB) *recreatableOtelCol { debugexporter.NewFactory(), ) require.NoError(tb, err) + factories.Telemetry = otelconftelemetry.NewFactory() return &recreatableOtelCol{ tempDir: testutil.TempDir(tb), factories: factories, diff --git a/exporter/elasticsearchexporter/integrationtest/go.mod b/exporter/elasticsearchexporter/integrationtest/go.mod index 4eb7b8933263b..48d2ccb3ad031 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.mod +++ b/exporter/elasticsearchexporter/integrationtest/go.mod @@ -29,6 +29,7 @@ require ( go.opentelemetry.io/collector/receiver v1.46.1-0.20251120204106-2e9c82787618 go.opentelemetry.io/collector/receiver/otlpreceiver v0.140.1-0.20251120204106-2e9c82787618 go.opentelemetry.io/collector/receiver/receivertest v0.140.1-0.20251120204106-2e9c82787618 + go.opentelemetry.io/collector/service v0.140.1-0.20251120204106-2e9c82787618 go.uber.org/zap v1.27.1 golang.org/x/sync v0.18.0 ) @@ -207,7 +208,6 @@ require ( go.opentelemetry.io/collector/processor/xprocessor v0.140.1-0.20251120204106-2e9c82787618 // indirect go.opentelemetry.io/collector/receiver/receiverhelper v0.140.1-0.20251120204106-2e9c82787618 // indirect go.opentelemetry.io/collector/receiver/xreceiver v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/service v0.140.1-0.20251120204106-2e9c82787618 // indirect go.opentelemetry.io/collector/service/hostcapabilities v0.140.1-0.20251120204106-2e9c82787618 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect From 54a7fb31d4d62e1a5336fcf4166affba7a339b0d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 26 Nov 2025 23:37:42 -0800 Subject: [PATCH 17/41] fix(deps): update module github.com/snowflakedb/gosnowflake to v1.18.0 (#44530) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Confidence | |---|---|---|---| | [github.com/snowflakedb/gosnowflake](https://redirect.github.com/snowflakedb/gosnowflake) | `v1.17.0` -> `v1.18.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fsnowflakedb%2fgosnowflake/v1.18.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fsnowflakedb%2fgosnowflake/v1.17.0/v1.18.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
snowflakedb/gosnowflake (github.com/snowflakedb/gosnowflake) ### [`v1.18.0`](https://redirect.github.com/snowflakedb/gosnowflake/releases/tag/v1.18.0): Release [Compare Source](https://redirect.github.com/snowflakedb/gosnowflake/compare/v1.17.1...v1.18.0) - Please check Snowflake [Go Snowflake for release notes](https://docs.snowflake.com/en/release-notes/clients-drivers/golang). ### [`v1.17.1`](https://redirect.github.com/snowflakedb/gosnowflake/releases/tag/v1.17.1): Release [Compare Source](https://redirect.github.com/snowflakedb/gosnowflake/compare/v1.17.0...v1.17.1) - Please check Snowflake [Go Snowflake for release notes](https://docs.snowflake.com/en/release-notes/clients-drivers/golang).
--- ### Configuration πŸ“… **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. β™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. πŸ”• **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com> --- receiver/snowflakereceiver/go.mod | 2 +- receiver/snowflakereceiver/go.sum | 4 ++-- receiver/sqlqueryreceiver/go.mod | 2 +- receiver/sqlqueryreceiver/go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/receiver/snowflakereceiver/go.mod b/receiver/snowflakereceiver/go.mod index a9bba420b0a9e..ea36814a0f26f 100644 --- a/receiver/snowflakereceiver/go.mod +++ b/receiver/snowflakereceiver/go.mod @@ -7,7 +7,7 @@ require ( github.com/google/go-cmp v0.7.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.140.1 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.140.1 - github.com/snowflakedb/gosnowflake v1.17.0 + github.com/snowflakedb/gosnowflake v1.18.0 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/collector/component v1.46.1-0.20251120204106-2e9c82787618 go.opentelemetry.io/collector/component/componenttest v0.140.1-0.20251120204106-2e9c82787618 diff --git a/receiver/snowflakereceiver/go.sum b/receiver/snowflakereceiver/go.sum index 981c00e353c43..7950d63595b47 100644 --- a/receiver/snowflakereceiver/go.sum +++ b/receiver/snowflakereceiver/go.sum @@ -159,8 +159,8 @@ github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0t github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.17.0 h1:be50vC0buiOitvneyRHiqNkvPMcunGD3EcTnL2zYATg= -github.com/snowflakedb/gosnowflake v1.17.0/go.mod h1:TaHvQGh9MA2lopZZMm1AvvENDfwcnKtuskIr1e6Fpic= +github.com/snowflakedb/gosnowflake v1.18.0 h1:DfTuV8mPGIf9PTR8fw0eBQtKYwg2hYenFFHD8/Gz63w= +github.com/snowflakedb/gosnowflake v1.18.0/go.mod h1:7D4+cLepOWrerVsH+tevW3zdMJ5/WrEN7ZceAC6xBv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= diff --git a/receiver/sqlqueryreceiver/go.mod b/receiver/sqlqueryreceiver/go.mod index 22411f4905bb2..d17fc56c4a3bd 100644 --- a/receiver/sqlqueryreceiver/go.mod +++ b/receiver/sqlqueryreceiver/go.mod @@ -30,7 +30,7 @@ require ( github.com/lib/pq v1.10.9 github.com/microsoft/go-mssqldb v1.9.4 github.com/sijms/go-ora/v2 v2.9.0 - github.com/snowflakedb/gosnowflake v1.17.0 + github.com/snowflakedb/gosnowflake v1.18.0 github.com/thda/tds v0.1.7 go.opentelemetry.io/collector/component/componenttest v0.140.1-0.20251120204106-2e9c82787618 go.opentelemetry.io/collector/confmap/xconfmap v0.140.1-0.20251120204106-2e9c82787618 diff --git a/receiver/sqlqueryreceiver/go.sum b/receiver/sqlqueryreceiver/go.sum index 599ab1a672281..3fb43270b2364 100644 --- a/receiver/sqlqueryreceiver/go.sum +++ b/receiver/sqlqueryreceiver/go.sum @@ -277,8 +277,8 @@ github.com/sijms/go-ora/v2 v2.9.0 h1:+iQbUeTeCOFMb5BsOMgUhV8KWyrv9yjKpcK4x7+MFrg github.com/sijms/go-ora/v2 v2.9.0/go.mod h1:QgFInVi3ZWyqAiJwzBQA+nbKYKH77tdp1PYoCqhR2dU= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.17.0 h1:be50vC0buiOitvneyRHiqNkvPMcunGD3EcTnL2zYATg= -github.com/snowflakedb/gosnowflake v1.17.0/go.mod h1:TaHvQGh9MA2lopZZMm1AvvENDfwcnKtuskIr1e6Fpic= +github.com/snowflakedb/gosnowflake v1.18.0 h1:DfTuV8mPGIf9PTR8fw0eBQtKYwg2hYenFFHD8/Gz63w= +github.com/snowflakedb/gosnowflake v1.18.0/go.mod h1:7D4+cLepOWrerVsH+tevW3zdMJ5/WrEN7ZceAC6xBv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= From 31a8be2b6d4ed835403e5678b0ae149b91814598 Mon Sep 17 00:00:00 2001 From: Caleb Hurshman Date: Thu, 27 Nov 2025 02:40:20 -0500 Subject: [PATCH 18/41] feat: Add macOS Unified Logging receiver implementation (#44434) #### Description Second PR for the `macosunifiedloggingreceiver`, adds the actual receiver implementation and unit tests. #### Link to tracking issue #44089 #### Testing Added unit tests from downstream - confirmed all passing. Temporarily added the receiver to `builder-config.yaml`, confirmed the collector built and the receiver ran successfully. --- ...unifiedloggingreceiver-implementation.yaml | 27 + .../config_common.go | 3 +- .../config_test.go | 184 +++-- receiver/macosunifiedloggingreceiver/go.mod | 1 + receiver/macosunifiedloggingreceiver/go.sum | 2 + .../macosunifiedloggingreceiver/receiver.go | 53 -- .../receiver_darwin.go | 338 ++++++++++ .../receiver_test.go | 627 ++++++++++++++++++ 8 files changed, 1135 insertions(+), 100 deletions(-) create mode 100644 .chloggen/feat_macosunifiedloggingreceiver-implementation.yaml delete mode 100644 receiver/macosunifiedloggingreceiver/receiver.go create mode 100644 receiver/macosunifiedloggingreceiver/receiver_darwin.go create mode 100644 receiver/macosunifiedloggingreceiver/receiver_test.go diff --git a/.chloggen/feat_macosunifiedloggingreceiver-implementation.yaml b/.chloggen/feat_macosunifiedloggingreceiver-implementation.yaml new file mode 100644 index 0000000000000..c9e30191b30f6 --- /dev/null +++ b/.chloggen/feat_macosunifiedloggingreceiver-implementation.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: receiver/macosunifiedlogging + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Implementation of the macOS Unified Logging Receiver. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44089] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/macosunifiedloggingreceiver/config_common.go b/receiver/macosunifiedloggingreceiver/config_common.go index e6f08404a16d2..6eb5926e2fdad 100644 --- a/receiver/macosunifiedloggingreceiver/config_common.go +++ b/receiver/macosunifiedloggingreceiver/config_common.go @@ -18,7 +18,8 @@ type Config struct { // resolvedArchivePaths stores the expanded archive paths after glob resolution // This is populated during Validate() and not exposed to users // Only used on darwin platform (see config.go) - //nolint:unused + // + //nolint:unused // only used on darwin platform resolvedArchivePaths []string // Predicate is a filter predicate to pass to the log command diff --git a/receiver/macosunifiedloggingreceiver/config_test.go b/receiver/macosunifiedloggingreceiver/config_test.go index 045e170303811..189bd0036c74a 100644 --- a/receiver/macosunifiedloggingreceiver/config_test.go +++ b/receiver/macosunifiedloggingreceiver/config_test.go @@ -6,7 +6,6 @@ package macosunifiedloggingreceiver import ( - "errors" "os" "path/filepath" "testing" @@ -19,108 +18,201 @@ import ( func TestConfigValidate(t *testing.T) { testCases := []struct { desc string - cfg *Config - expectedErr error + makeCfg func(t *testing.T) *Config + expectedErr string }{ { desc: "valid config - live mode", - cfg: &Config{ - MaxPollInterval: 50 * time.Second, - MaxLogAge: 12 * time.Hour, + makeCfg: func(t *testing.T) *Config { + return &Config{ + MaxPollInterval: 50 * time.Second, + MaxLogAge: 12 * time.Hour, + } }, - expectedErr: nil, }, { desc: "invalid archive path - does not exist", - cfg: &Config{ - ArchivePath: "/tmp/test/invalid", + makeCfg: func(t *testing.T) *Config { + return &Config{ + ArchivePath: filepath.Join(t.TempDir(), "missing", "logs.logarchive"), + } }, - expectedErr: errors.New("no such file or directory"), + expectedErr: "no such file or directory", }, { desc: "invalid archive path - not a directory", - cfg: &Config{ - ArchivePath: "./README.md", + makeCfg: func(t *testing.T) *Config { + return &Config{ + ArchivePath: "./README.md", + } + }, + expectedErr: "must be a directory", + }, + { + desc: "archive glob requires at least one match", + makeCfg: func(t *testing.T) *Config { + return &Config{ + ArchivePath: filepath.Join(t.TempDir(), "*.logarchive"), + } + }, + expectedErr: "no archive paths matched the provided pattern", + }, + { + desc: "end time requires archive path", + makeCfg: func(t *testing.T) *Config { + return &Config{ + EndTime: "2024-01-02 00:00:00", + } }, - expectedErr: errors.New("must be a directory"), + expectedErr: "end_time can only be used with archive_path", }, { desc: "valid predicate with AND", - cfg: &Config{ - Predicate: "subsystem == 'com.apple.example' AND messageType == 'Error'", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == 'com.apple.example' AND messageType == 'Error'", + } }, - expectedErr: nil, }, { desc: "valid predicate with && (normalized to AND)", - cfg: &Config{ - Predicate: "subsystem == 'com.apple.example' && messageType == 'Error'", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == 'com.apple.example' && messageType == 'Error'", + } }, - expectedErr: nil, }, { desc: "valid predicate with || (normalized to OR)", - cfg: &Config{ - Predicate: "subsystem == 'com.apple.example' || messageType == 'Error'", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == 'com.apple.example' || messageType == 'Error'", + } }, - expectedErr: nil, }, { desc: "valid predicate with comparison operators", - cfg: &Config{ - Predicate: "processID > 100 && processID < 1000", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "processID > 100 && processID < 1000", + } }, - expectedErr: nil, }, { desc: "valid predicate with > comparison and spaces", - cfg: &Config{ - Predicate: "processID >100", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "processID >100", + } }, - expectedErr: nil, }, { desc: "invalid predicate - semicolon", - cfg: &Config{ - Predicate: "subsystem == 'test'; curl http://evil.com", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == 'test'; curl http://evil.com", + } }, - expectedErr: errors.New("predicate contains invalid character"), + expectedErr: "predicate contains invalid character", }, { desc: "invalid predicate - pipe", - cfg: &Config{ - Predicate: "subsystem == 'test' | sh", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == 'test' | sh", + } }, - expectedErr: errors.New("predicate contains invalid character"), + expectedErr: "predicate contains invalid character", }, { desc: "invalid predicate - dollar sign", - cfg: &Config{ - Predicate: "subsystem == '$HOME'", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == '$HOME'", + } }, - expectedErr: errors.New("predicate contains invalid character"), + expectedErr: "predicate contains invalid character", }, { desc: "invalid predicate - backtick", - cfg: &Config{ - Predicate: "subsystem == '`whoami`'", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == '`whoami`'", + } }, - expectedErr: errors.New("predicate contains invalid character"), + expectedErr: "predicate contains invalid character", }, { desc: "invalid predicate - append redirect", - cfg: &Config{ - Predicate: "subsystem == 'test' >> /tmp/output", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem == 'test' >> /tmp/output", + } + }, + expectedErr: "predicate contains invalid character", + }, + { + desc: "predicate must contain valid field name", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "unknownField == 'value'", + } + }, + expectedErr: "predicate must contain at least one valid field name", + }, + { + desc: "predicate must contain operator", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "subsystem 'com.apple'", + } + }, + expectedErr: "predicate must contain at least one valid operator", + }, + { + desc: "predicate must contain valid event type when type is referenced", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "type == 'invalidEvent'", + } + }, + expectedErr: "predicate must contain at least one valid event type", + }, + { + desc: "predicate must contain valid log type when logType is referenced", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "logType == 'invalid'", + } + }, + expectedErr: "predicate must contain at least one valid log type", + }, + { + desc: "predicate must contain valid signpost scope when signpostScope is referenced", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "signpostScope == 'invalid'", + } + }, + expectedErr: "predicate must contain at least one valid signpost scope", + }, + { + desc: "predicate must contain valid signpost type when signpostType is referenced", + makeCfg: func(t *testing.T) *Config { + return &Config{ + Predicate: "signpostType == 'invalid'", + } }, - expectedErr: errors.New("predicate contains invalid character"), + expectedErr: "predicate must contain at least one valid signpost type", }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - err := tc.cfg.Validate() - if tc.expectedErr != nil { - require.ErrorContains(t, err, tc.expectedErr.Error()) + cfg := tc.makeCfg(t) + err := cfg.Validate() + if tc.expectedErr != "" { + require.ErrorContains(t, err, tc.expectedErr) } else { require.NoError(t, err) } diff --git a/receiver/macosunifiedloggingreceiver/go.mod b/receiver/macosunifiedloggingreceiver/go.mod index 6cb5775ed4dcb..61597643c796e 100644 --- a/receiver/macosunifiedloggingreceiver/go.mod +++ b/receiver/macosunifiedloggingreceiver/go.mod @@ -18,6 +18,7 @@ require ( ) require ( + github.com/cenkalti/backoff/v4 v4.3.0 github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect diff --git a/receiver/macosunifiedloggingreceiver/go.sum b/receiver/macosunifiedloggingreceiver/go.sum index e6fe00a18708c..e8fe499177f65 100644 --- a/receiver/macosunifiedloggingreceiver/go.sum +++ b/receiver/macosunifiedloggingreceiver/go.sum @@ -1,5 +1,7 @@ github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/receiver/macosunifiedloggingreceiver/receiver.go b/receiver/macosunifiedloggingreceiver/receiver.go deleted file mode 100644 index e862711e57fe6..0000000000000 --- a/receiver/macosunifiedloggingreceiver/receiver.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package macosunifiedloggingreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/macosunifiedloggingreceiver" - -import ( - "context" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" - "go.uber.org/zap" -) - -// unifiedLoggingReceiver uses exec.Command to run the native macOS `log` command -// -//nolint:unused // only used on darwin platform (see config.go) -type unifiedLoggingReceiver struct { - config *Config - logger *zap.Logger - consumer consumer.Logs - cancel context.CancelFunc -} - -//nolint:unused // only used on darwin platform (see config.go) -func newUnifiedLoggingReceiver( - config *Config, - logger *zap.Logger, - consumer consumer.Logs, -) *unifiedLoggingReceiver { - return &unifiedLoggingReceiver{ - config: config, - logger: logger, - consumer: consumer, - } -} - -//nolint:unused // only used on darwin platform (see config.go) -func (r *unifiedLoggingReceiver) Start(ctx context.Context, _ component.Host) error { - r.logger.Info("Starting macOS unified logging receiver") - - _, cancel := context.WithCancel(ctx) - r.cancel = cancel - return nil -} - -//nolint:unused // only used on darwin platform (see config.go) -func (r *unifiedLoggingReceiver) Shutdown(_ context.Context) error { - r.logger.Info("Shutting down macOS unified logging receiver") - if r.cancel != nil { - r.cancel() - } - return nil -} diff --git a/receiver/macosunifiedloggingreceiver/receiver_darwin.go b/receiver/macosunifiedloggingreceiver/receiver_darwin.go new file mode 100644 index 0000000000000..53e4c11a3a4a8 --- /dev/null +++ b/receiver/macosunifiedloggingreceiver/receiver_darwin.go @@ -0,0 +1,338 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin + +package macosunifiedloggingreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/macosunifiedloggingreceiver" + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/zap" +) + +// unifiedLoggingReceiver uses exec.Command to run the native macOS `log` command +type unifiedLoggingReceiver struct { + config *Config + logger *zap.Logger + consumer consumer.Logs + cancel context.CancelFunc +} + +func newUnifiedLoggingReceiver( + config *Config, + logger *zap.Logger, + consumer consumer.Logs, +) *unifiedLoggingReceiver { + return &unifiedLoggingReceiver{ + config: config, + logger: logger, + consumer: consumer, + } +} + +func (r *unifiedLoggingReceiver) Start(ctx context.Context, _ component.Host) error { + r.logger.Info("Starting macOS unified logging receiver") + + ctx, cancel := context.WithCancel(ctx) + r.cancel = cancel + + // Start reading logs in a goroutine + go r.readLogs(ctx) + + return nil +} + +func (r *unifiedLoggingReceiver) Shutdown(_ context.Context) error { + r.logger.Info("Shutting down macOS unified logging receiver") + if r.cancel != nil { + r.cancel() + } + return nil +} + +// readLogs runs the log command and processes output +func (r *unifiedLoggingReceiver) readLogs(ctx context.Context) { + // Run immediately on startup + if r.config.ArchivePath == "" { + r.readFromLive(ctx) + } else { + r.readFromArchive(ctx) + } +} + +func (r *unifiedLoggingReceiver) readFromArchive(ctx context.Context) { + resolvedPaths := r.config.resolvedArchivePaths + r.logger.Info("Reading from archive mode", zap.Int("archive_count", len(resolvedPaths))) + + wg := &sync.WaitGroup{} + for _, archivePath := range resolvedPaths { + wg.Add(1) + go func(archivePath string) { + defer wg.Done() + r.logger.Info("Processing archive", zap.String("path", archivePath)) + if _, err := r.runLogCommand(ctx, archivePath); err != nil { + r.logger.Error("Failed to run log command for archive", zap.String("archive", archivePath), zap.Error(err)) + return + } + }(archivePath) + } + wg.Wait() + r.logger.Info("Finished reading archive logs") +} + +func (r *unifiedLoggingReceiver) readFromLive(ctx context.Context) { + // Run immediately on startup + _, err := r.runLogCommand(ctx, "") + if err != nil { + r.logger.Error("Failed to run log command", zap.Error(err)) + return + } + + // For live mode, use exponential backoff based on whether logs are being actively written + // Configure backoff starting at 100ms, maxing out at MaxPollInterval + expBackoff := backoff.NewExponentialBackOff( + backoff.WithInitialInterval(100*time.Millisecond), + backoff.WithMaxInterval(r.config.MaxPollInterval), + backoff.WithMultiplier(2.0), + backoff.WithMaxElapsedTime(0), // Never stop + ) + expBackoff.Reset() + + ticker := backoff.NewTicker(expBackoff) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + count, err := r.runLogCommand(ctx, "") + if err != nil { + r.logger.Error("Failed to run log command", zap.Error(err)) + } + + // If logs were processed, reset backoff to minimum interval + if count > 0 { + r.logger.Debug("Logs found, resetting backoff to minimum interval", zap.Int("count", count)) + expBackoff.Reset() + } + // If no logs, backoff will continue to increase to MaxPollInterval + } + } +} + +// runLogCommand executes the log command and processes output +// Returns the number of logs processed +// archivePath should be empty string for live mode, or a specific archive path for archive mode +func (r *unifiedLoggingReceiver) runLogCommand(ctx context.Context, archivePath string) (int, error) { + // Build the log command arguments + args := r.buildLogCommandArgs(archivePath) + + r.logger.Info("Running log command", zap.Strings("args", args)) + + // Create the command + cmd := exec.CommandContext(ctx, "log", args...) // #nosec G204 - args are controlled by config + + // Get stdout pipe + stdout, err := cmd.StdoutPipe() + if err != nil { + return 0, fmt.Errorf("failed to get stdout pipe: %w", err) + } + + // Start the command + if err := cmd.Start(); err != nil { + return 0, fmt.Errorf("failed to start log command: %w", err) + } + + // Ensure the process is properly cleaned up to avoid zombies + defer func() { + _ = cmd.Wait() + }() + + // Read and process output line by line + scanner := bufio.NewScanner(stdout) + // Set a large buffer size for long log lines + buf := make([]byte, 0, 1024*1024) // 1MB buffer + scanner.Buffer(buf, 10*1024*1024) // 10MB max + + var processedCount int + isFirstLine := true + // Skip the header line in text-based formats (default, syslog, compact) + isTextFormat := r.config.Format == "default" || r.config.Format == "syslog" || r.config.Format == "compact" + for scanner.Scan() { + select { + case <-ctx.Done(): + err := cmd.Process.Kill() + if err != nil { + r.logger.Error("Failed to kill log command", zap.Error(err)) + } + return processedCount, ctx.Err() + default: + line := scanner.Bytes() + if len(line) == 0 { + continue + } + + if isTextFormat && isFirstLine { + isFirstLine = false + continue + } + isFirstLine = false + + // Skip completion/status messages (applies to all formats) + if isCompletionLine(line) { + continue + } + + // Parse and send the log entry + if err := r.processLogLine(ctx, line); err != nil { + r.logger.Warn("Failed to process log line", + zap.Error(err)) + continue + } + processedCount++ + } + } + + if err := scanner.Err(); err != nil { + return processedCount, fmt.Errorf("error reading log output: %w", err) + } + + r.logger.Debug("Processed logs", zap.Int("count", processedCount)) + return processedCount, nil +} + +// buildLogCommandArgs constructs the arguments for the log command +// archivePath should be empty string for live mode, or a specific archive path for archive mode +func (r *unifiedLoggingReceiver) buildLogCommandArgs(archivePath string) []string { + args := []string{"show"} + + // Add archive path if specified + if archivePath != "" { + args = append(args, "--archive", archivePath) + } + + // Add style flag if format is not default + if r.config.Format != "" && r.config.Format != "default" { + args = append(args, "--style", r.config.Format) + } + + // Add start time + if r.config.StartTime != "" { + args = append(args, "--start", r.config.StartTime) + } else if r.config.MaxLogAge > 0 && archivePath == "" { + // For live mode, calculate start time from max_log_age + startTime := time.Now().Add(-r.config.MaxLogAge) + args = append(args, "--start", startTime.Format("2006-01-02 15:04:05")) + } + + // Add end time (archive mode only) + if r.config.EndTime != "" && archivePath != "" { + args = append(args, "--end", r.config.EndTime) + } + + // Add predicate filter + if r.config.Predicate != "" { + args = append(args, "--predicate", r.config.Predicate) + } + + return args +} + +// processLogLine processes a log line and sends it to the consumer +func (r *unifiedLoggingReceiver) processLogLine(ctx context.Context, line []byte) error { + // Convert to OTel plog + logs := plog.NewLogs() + resourceLogs := logs.ResourceLogs().AppendEmpty() + scopeLogs := resourceLogs.ScopeLogs().AppendEmpty() + logRecord := scopeLogs.LogRecords().AppendEmpty() + + // Put the entire line into the log body as a string + logRecord.Body().SetStr(string(line)) + logRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now())) + + // Parse timestamp and severity when using JSON formats + if r.config.Format == "ndjson" || r.config.Format == "json" { + var logEntry map[string]any + if err := json.Unmarshal(line, &logEntry); err == nil { + // Parse and set timestamp + if ts, ok := logEntry["timestamp"].(string); ok { + if t, err := time.Parse("2006-01-02 15:04:05.000000-0700", ts); err == nil { + logRecord.SetTimestamp(pcommon.NewTimestampFromTime(t)) + } + } + + // Set severity from messageType + if msgType, ok := logEntry["messageType"].(string); ok { + logRecord.SetSeverityText(msgType) + logRecord.SetSeverityNumber(mapMessageTypeToSeverity(msgType)) + } + } + } + + // Send to consumer + return r.consumer.ConsumeLogs(ctx, logs) +} + +// mapMessageTypeToSeverity maps log messageType to OTel severity +func mapMessageTypeToSeverity(msgType string) plog.SeverityNumber { + switch msgType { + case "Error": + return plog.SeverityNumberError + case "Fault": + return plog.SeverityNumberFatal + case "Default", "Info": + return plog.SeverityNumberInfo + case "Debug": + return plog.SeverityNumberDebug + default: + return plog.SeverityNumberUnspecified + } +} + +// isCompletionLine checks if a line is a completion/status message from the log command +// These lines should be filtered out (e.g., {"count":540659,"finished":1}) +func isCompletionLine(line []byte) bool { + // Trim whitespace + trimmed := bytes.TrimSpace(line) + + // Check if line is empty + if len(trimmed) == 0 { + return false + } + + // Check if line starts with "**" (typical completion message format) + if bytes.HasPrefix(trimmed, []byte("**")) { + return true + } + + // Check for JSON completion format: {"count":N,"finished":1} + if bytes.HasPrefix(trimmed, []byte("{")) && bytes.HasSuffix(trimmed, []byte("}")) { + // Quick check for both "count" and "finished" fields + if bytes.Contains(trimmed, []byte("\"count\"")) && + bytes.Contains(trimmed, []byte("\"finished\"")) { + return true + } + } + + // Check for common completion keywords + if bytes.Contains(trimmed, []byte("Processed")) && + (bytes.Contains(trimmed, []byte("entries")) || bytes.Contains(trimmed, []byte("done"))) { + return true + } + + return false +} diff --git a/receiver/macosunifiedloggingreceiver/receiver_test.go b/receiver/macosunifiedloggingreceiver/receiver_test.go new file mode 100644 index 0000000000000..7b146f6127949 --- /dev/null +++ b/receiver/macosunifiedloggingreceiver/receiver_test.go @@ -0,0 +1,627 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin + +package macosunifiedloggingreceiver + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/zap" +) + +func setupFakeLogBinary(t *testing.T) { + t.Helper() + dir := t.TempDir() + scriptPath := filepath.Join(dir, "log") + script := `#!/bin/sh +set -e +if [ -n "$FAKE_LOG_CALLS_FILE" ]; then + cmd="" + if [ "$#" -gt 0 ]; then + cmd="$1" + shift + for arg in "$@" + do + cmd="$cmd $arg" + done + fi + printf "%s\n" "$cmd" >> "$FAKE_LOG_CALLS_FILE" +fi + +if [ -n "$FAKE_LOG_STREAM_LINE" ]; then + while true + do + printf "%s\n" "$FAKE_LOG_STREAM_LINE" + if [ -n "$FAKE_LOG_STREAM_DELAY" ]; then + sleep "$FAKE_LOG_STREAM_DELAY" + fi + done +fi + +if [ -n "$FAKE_LOG_OUTPUT_PATH" ] && [ -f "$FAKE_LOG_OUTPUT_PATH" ]; then + cat "$FAKE_LOG_OUTPUT_PATH" +fi +` + require.NoError(t, os.WriteFile(scriptPath, []byte(script), 0o755)) + t.Setenv("PATH", dir+":"+os.Getenv("PATH")) +} + +func writeFakeLogOutput(t *testing.T, lines ...string) string { + t.Helper() + path := filepath.Join(t.TempDir(), "log_output.txt") + content := strings.Join(lines, "\n") + "\n" + require.NoError(t, os.WriteFile(path, []byte(content), 0o644)) + return path +} + +func readRecordedCalls(t *testing.T, path string) []string { + t.Helper() + data, err := os.ReadFile(path) + require.NoError(t, err) + trimmed := strings.TrimSpace(string(data)) + if trimmed == "" { + return nil + } + return strings.Split(trimmed, "\n") +} + +func TestBuildLogCommandArgs(t *testing.T) { + t.Run("with ndjson format", func(t *testing.T) { + receiver := &unifiedLoggingReceiver{ + config: &Config{ + ArchivePath: "./testdata/system_logs.logarchive", + StartTime: "2024-01-01 00:00:00", + EndTime: "2024-01-02 00:00:00", + Predicate: "subsystem == 'com.apple.systempreferences'", + Format: "ndjson", + }, + } + + args := receiver.buildLogCommandArgs("./testdata/system_logs.logarchive") + require.Contains(t, args, "--archive") + require.Contains(t, args, "./testdata/system_logs.logarchive") + require.Contains(t, args, "--start") + require.Contains(t, args, "2024-01-01 00:00:00") + require.Contains(t, args, "--end") + require.Contains(t, args, "2024-01-02 00:00:00") + require.Contains(t, args, "--predicate") + require.Contains(t, args, "subsystem == 'com.apple.systempreferences'") + require.Contains(t, args, "--style") + require.Contains(t, args, "ndjson") + }) + + t.Run("with default format", func(t *testing.T) { + receiver := &unifiedLoggingReceiver{ + config: &Config{ + ArchivePath: "./testdata/system_logs.logarchive", + StartTime: "2024-01-01 00:00:00", + Predicate: "subsystem == 'com.apple.systempreferences'", + Format: "default", + }, + } + + args := receiver.buildLogCommandArgs("./testdata/system_logs.logarchive") + require.Contains(t, args, "--archive") + require.Contains(t, args, "./testdata/system_logs.logarchive") + require.Contains(t, args, "--start") + require.Contains(t, args, "2024-01-01 00:00:00") + require.Contains(t, args, "--predicate") + require.Contains(t, args, "subsystem == 'com.apple.systempreferences'") + // Should NOT contain --style when format is default + require.NotContains(t, args, "--style") + }) + + t.Run("with json format", func(t *testing.T) { + receiver := &unifiedLoggingReceiver{ + config: &Config{ + Format: "json", + }, + } + + args := receiver.buildLogCommandArgs("") + require.Contains(t, args, "--style") + require.Contains(t, args, "json") + }) +} + +func TestProcessLogLine(t *testing.T) { + t.Run("default format - sends unparsed line", func(t *testing.T) { + sink := &consumertest.LogsSink{} + receiver := &unifiedLoggingReceiver{ + config: &Config{ + Format: "default", + }, + consumer: sink, + logger: zap.NewNop(), + } + + rawLine := []byte("2024-01-01 12:00:00.123456-0700 localhost kernel[0]: (AppleACPIPlatform) AppleACPICPU: ProcessorId=0 LocalApicId=0 Enabled") + err := receiver.processLogLine(context.Background(), rawLine) + require.NoError(t, err) + + // Verify the log was consumed + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + require.Equal(t, 1, logs.LogRecordCount()) + + // Verify the log record contains the raw line as string body + logRecord := logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + require.Equal(t, string(rawLine), logRecord.Body().Str()) + + // In default format, timestamp should only be observed (not parsed) + require.NotZero(t, logRecord.ObservedTimestamp()) + require.Zero(t, logRecord.Timestamp()) + + // In default format, severity should not be set + require.Equal(t, "", logRecord.SeverityText()) + require.Equal(t, plog.SeverityNumberUnspecified, logRecord.SeverityNumber()) + }) + + t.Run("ndjson format - parses timestamp and severity", func(t *testing.T) { + sink := &consumertest.LogsSink{} + receiver := &unifiedLoggingReceiver{ + config: &Config{ + Format: "ndjson", + }, + consumer: sink, + logger: zap.NewNop(), + } + + jsonLine := []byte(`{"timestamp":"2024-01-01 12:00:00.123456-0700","eventMessage":"Test message","messageType":"Error","subsystem":"com.test"}`) + err := receiver.processLogLine(context.Background(), jsonLine) + require.NoError(t, err) + + // Verify the log was consumed + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + require.Equal(t, 1, logs.LogRecordCount()) + + // Verify the log record contains the entire JSON as body + logRecord := logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + require.Equal(t, string(jsonLine), logRecord.Body().Str()) + + // Verify timestamp was parsed from JSON + require.NotZero(t, logRecord.Timestamp()) + expectedTime, _ := time.Parse("2006-01-02 15:04:05.000000-0700", "2024-01-01 12:00:00.123456-0700") + require.Equal(t, expectedTime.UnixNano(), logRecord.Timestamp().AsTime().UnixNano()) + + // Verify severity was parsed from JSON + require.Equal(t, "Error", logRecord.SeverityText()) + require.Equal(t, plog.SeverityNumberError, logRecord.SeverityNumber()) + }) + + t.Run("ndjson format - handles invalid json gracefully", func(t *testing.T) { + sink := &consumertest.LogsSink{} + receiver := &unifiedLoggingReceiver{ + config: &Config{ + Format: "ndjson", + }, + consumer: sink, + logger: zap.NewNop(), + } + + invalidJSON := []byte(`{invalid json}`) + err := receiver.processLogLine(context.Background(), invalidJSON) + require.NoError(t, err) + + // Verify the log was still consumed (with just the body) + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + require.Equal(t, 1, logs.LogRecordCount()) + + // Verify the log record contains the invalid JSON as body + logRecord := logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + require.Equal(t, string(invalidJSON), logRecord.Body().Str()) + + // Timestamp should only be observed (not parsed from invalid JSON) + require.NotZero(t, logRecord.ObservedTimestamp()) + require.Zero(t, logRecord.Timestamp()) + }) + + t.Run("json format - parses timestamp and severity", func(t *testing.T) { + sink := &consumertest.LogsSink{} + receiver := &unifiedLoggingReceiver{ + config: &Config{ + Format: "json", + }, + consumer: sink, + logger: zap.NewNop(), + } + + jsonLine := []byte(`{"timestamp":"2024-01-01 12:00:00.123456-0700","eventMessage":"Test message","messageType":"Debug","subsystem":"com.test"}`) + err := receiver.processLogLine(context.Background(), jsonLine) + require.NoError(t, err) + + // Verify the log was consumed + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + require.Equal(t, 1, logs.LogRecordCount()) + + // Verify the log record contains the entire JSON as body + logRecord := logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + require.Equal(t, string(jsonLine), logRecord.Body().Str()) + + // Verify timestamp was parsed from JSON + require.NotZero(t, logRecord.Timestamp()) + + // Verify severity was parsed from JSON + require.Equal(t, "Debug", logRecord.SeverityText()) + require.Equal(t, plog.SeverityNumberDebug, logRecord.SeverityNumber()) + }) + + t.Run("ndjson format - handles json without timestamp or severity", func(t *testing.T) { + sink := &consumertest.LogsSink{} + receiver := &unifiedLoggingReceiver{ + config: &Config{ + Format: "ndjson", + }, + consumer: sink, + logger: zap.NewNop(), + } + + jsonLine := []byte(`{"eventMessage":"Test message","subsystem":"com.test"}`) + err := receiver.processLogLine(context.Background(), jsonLine) + require.NoError(t, err) + + // Verify the log was consumed + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + require.Equal(t, 1, logs.LogRecordCount()) + + // Verify the log record contains the JSON as body + logRecord := logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + require.Equal(t, string(jsonLine), logRecord.Body().Str()) + + // Timestamp should only be observed (no timestamp in JSON) + require.NotZero(t, logRecord.ObservedTimestamp()) + require.Zero(t, logRecord.Timestamp()) + + // Severity should not be set (no messageType in JSON) + require.Equal(t, "", logRecord.SeverityText()) + require.Equal(t, plog.SeverityNumberUnspecified, logRecord.SeverityNumber()) + }) +} + +func TestMapMessageTypeToSeverity(t *testing.T) { + tests := []struct { + name string + msgType string + expected plog.SeverityNumber + }{ + { + name: "Error message type", + msgType: "Error", + expected: plog.SeverityNumberError, + }, + { + name: "Fault message type", + msgType: "Fault", + expected: plog.SeverityNumberFatal, + }, + { + name: "Default message type", + msgType: "Default", + expected: plog.SeverityNumberInfo, + }, + { + name: "Info message type", + msgType: "Info", + expected: plog.SeverityNumberInfo, + }, + { + name: "Debug message type", + msgType: "Debug", + expected: plog.SeverityNumberDebug, + }, + { + name: "Unknown message type", + msgType: "Unknown", + expected: plog.SeverityNumberUnspecified, + }, + { + name: "Empty message type", + msgType: "", + expected: plog.SeverityNumberUnspecified, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := mapMessageTypeToSeverity(tt.msgType) + if result != tt.expected { + t.Errorf("mapMessageTypeToSeverity(%q) = %v, want %v", tt.msgType, result, tt.expected) + } + }) + } +} + +func TestIsCompletionLine(t *testing.T) { + tests := []struct { + name string + line string + expected bool + }{ + { + name: "JSON completion format", + line: `{"count":540659,"finished":1}`, + expected: true, + }, + { + name: "JSON completion format with whitespace", + line: ` {"count":100,"finished":1} `, + expected: true, + }, + { + name: "completion line with asterisks", + line: "** Processed 574 entries, done. **", + expected: true, + }, + { + name: "completion line with whitespace", + line: " ** Finished processing ** ", + expected: true, + }, + { + name: "completion line with Processed and entries", + line: "Processed 100 entries successfully", + expected: true, + }, + { + name: "completion line with Processed and done", + line: "Processed all logs, done", + expected: true, + }, + { + name: "normal log line", + line: "2024-01-01 12:00:00.123456-0700 localhost kernel[0]: System initialized", + expected: false, + }, + { + name: "log line containing Processed word only", + line: "2024-01-01 12:00:00.123456-0700 localhost app[123]: Processed user request", + expected: false, + }, + { + name: "JSON without count and finished", + line: `{"timestamp":"2024-01-01","message":"test"}`, + expected: false, + }, + { + name: "empty line", + line: "", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isCompletionLine([]byte(tt.line)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestReadFromMultipleArchives(t *testing.T) { + // Create test archive directories + testdataDir := filepath.Join(".", "testdata", "multi_archive_test") + archive1 := filepath.Join(testdataDir, "archive1.logarchive") + archive2 := filepath.Join(testdataDir, "archive2.logarchive") + archive3 := filepath.Join(testdataDir, "logs", "archive3.logarchive") + + _ = os.MkdirAll(archive1, 0o755) + _ = os.MkdirAll(archive2, 0o755) + _ = os.MkdirAll(archive3, 0o755) + + defer func() { + _ = os.RemoveAll(testdataDir) + }() + + t.Run("receiver processes multiple archives with glob pattern", func(t *testing.T) { + // Create config with glob pattern + cfg := &Config{ + ArchivePath: filepath.Join(testdataDir, "*.logarchive"), + Format: "ndjson", + } + + // Validate config to resolve glob paths + err := cfg.Validate() + require.NoError(t, err) + + // Verify that multiple archives were resolved + resolvedPaths := cfg.resolvedArchivePaths + require.Len(t, resolvedPaths, 2, "Should resolve 2 archives matching *.logarchive pattern") + require.Contains(t, resolvedPaths, archive1) + require.Contains(t, resolvedPaths, archive2) + require.NotContains(t, resolvedPaths, archive3, "Should not include archive3 (in subdirectory)") + + // Create receiver + sink := &consumertest.LogsSink{} + receiver := newUnifiedLoggingReceiver(cfg, zap.NewNop(), sink) + require.NotNil(t, receiver) + require.Equal(t, 2, len(receiver.config.resolvedArchivePaths)) + }) + + t.Run("receiver processes multiple archives with doublestar glob pattern", func(t *testing.T) { + // Create config with doublestar glob pattern + cfg := &Config{ + ArchivePath: filepath.Join(testdataDir, "**", "*.logarchive"), + Format: "ndjson", + } + + // Validate config to resolve glob paths + err := cfg.Validate() + require.NoError(t, err) + + // Verify that all archives were resolved (including subdirectories) + resolvedPaths := cfg.resolvedArchivePaths + require.Len(t, resolvedPaths, 3, "Should resolve all 3 archives with ** pattern") + require.Contains(t, resolvedPaths, archive1) + require.Contains(t, resolvedPaths, archive2) + require.Contains(t, resolvedPaths, archive3) + + // Create receiver + sink := &consumertest.LogsSink{} + receiver := newUnifiedLoggingReceiver(cfg, zap.NewNop(), sink) + require.NotNil(t, receiver) + require.Equal(t, 3, len(receiver.config.resolvedArchivePaths)) + }) + + t.Run("receiver handles single archive path", func(t *testing.T) { + // Create config with direct path (no glob) + cfg := &Config{ + ArchivePath: archive1, + Format: "ndjson", + } + + // Validate config + err := cfg.Validate() + require.NoError(t, err) + + // Verify that single archive was resolved + resolvedPaths := cfg.resolvedArchivePaths + require.Len(t, resolvedPaths, 1, "Should resolve to single archive") + require.Contains(t, resolvedPaths, archive1) + + // Create receiver + sink := &consumertest.LogsSink{} + receiver := newUnifiedLoggingReceiver(cfg, zap.NewNop(), sink) + require.NotNil(t, receiver) + require.Equal(t, 1, len(receiver.config.resolvedArchivePaths)) + }) +} + +func TestRunLogCommandSkipsHeaderAndCompletionLines(t *testing.T) { + setupFakeLogBinary(t) + outputPath := writeFakeLogOutput(t, + "Timestamp Thread Type Activity PID", + "** Processed 10 entries, done. **", + "2024-01-01 12:00:00.000000-0700 localhost app[123]: Final log line", + ) + t.Setenv("FAKE_LOG_OUTPUT_PATH", outputPath) + + sink := &consumertest.LogsSink{} + receiver := newUnifiedLoggingReceiver(&Config{Format: "default"}, zap.NewNop(), sink) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + count, err := receiver.runLogCommand(ctx, "") + require.NoError(t, err) + require.Equal(t, 1, count) + + allLogs := sink.AllLogs() + require.Len(t, allLogs, 1) + logRecord := allLogs[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + require.Equal(t, "2024-01-01 12:00:00.000000-0700 localhost app[123]: Final log line", logRecord.Body().Str()) +} + +// func TestRunLogCommandRespectsContextCancellation(t *testing.T) { +// setupFakeLogBinary(t) +// t.Setenv("FAKE_LOG_STREAM_LINE", `{"timestamp":"2024-01-01 12:00:00.000000-0700","eventMessage":"Test","messageType":"Info"}`) +// t.Setenv("FAKE_LOG_STREAM_DELAY", "0.01") + +// sink := &consumertest.LogsSink{} +// receiver := newUnifiedLoggingReceiver(&Config{Format: "ndjson"}, zap.NewNop(), sink) + +// ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) +// defer cancel() + +// count, err := receiver.runLogCommand(ctx, "") +// require.Error(t, err) +// require.ErrorIs(t, err, context.DeadlineExceeded) +// require.Greater(t, count, 0) +// } + +func TestReadFromArchiveProcessesAllResolvedPaths(t *testing.T) { + setupFakeLogBinary(t) + outputPath := writeFakeLogOutput(t, `{"timestamp":"2024-01-01 12:00:00.000000-0700","eventMessage":"Archive","messageType":"Info"}`) + t.Setenv("FAKE_LOG_OUTPUT_PATH", outputPath) + + callsFile := filepath.Join(t.TempDir(), "calls.txt") + t.Setenv("FAKE_LOG_CALLS_FILE", callsFile) + + archiveRoot := t.TempDir() + archiveOne := filepath.Join(archiveRoot, "one.logarchive") + archiveTwo := filepath.Join(archiveRoot, "two.logarchive") + require.NoError(t, os.MkdirAll(archiveOne, 0o755)) + require.NoError(t, os.MkdirAll(archiveTwo, 0o755)) + + cfg := &Config{ + Format: "ndjson", + } + cfg.resolvedArchivePaths = []string{archiveOne, archiveTwo} + + sink := &consumertest.LogsSink{} + receiver := newUnifiedLoggingReceiver(cfg, zap.NewNop(), sink) + + receiver.readFromArchive(context.Background()) + + calls := readRecordedCalls(t, callsFile) + require.Len(t, calls, 2) + + callSet := map[string]bool{} + for _, call := range calls { + if strings.Contains(call, archiveOne) { + callSet[archiveOne] = true + } + if strings.Contains(call, archiveTwo) { + callSet[archiveTwo] = true + } + } + require.True(t, callSet[archiveOne]) + require.True(t, callSet[archiveTwo]) + + allLogs := sink.AllLogs() + require.Len(t, allLogs, 2) +} + +func TestReadFromLiveUsesBackoffLoop(t *testing.T) { + setupFakeLogBinary(t) + outputPath := writeFakeLogOutput(t, `{"timestamp":"2024-01-01 12:00:00.000000-0700","eventMessage":"Live","messageType":"Info"}`) + t.Setenv("FAKE_LOG_OUTPUT_PATH", outputPath) + + callsFile := filepath.Join(t.TempDir(), "live_calls.txt") + t.Setenv("FAKE_LOG_CALLS_FILE", callsFile) + + cfg := &Config{ + Format: "ndjson", + MaxPollInterval: 150 * time.Millisecond, + } + + sink := &consumertest.LogsSink{} + receiver := newUnifiedLoggingReceiver(cfg, zap.NewNop(), sink) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan struct{}) + go func() { + receiver.readFromLive(ctx) + close(done) + }() + + time.Sleep(250 * time.Millisecond) + cancel() + + select { + case <-done: + case <-time.After(time.Second): + t.Fatal("readFromLive did not stop after cancellation") + } + + calls := readRecordedCalls(t, callsFile) + require.GreaterOrEqual(t, len(calls), 2, "expected initial run plus at least one ticker iteration") + + allLogs := sink.AllLogs() + require.GreaterOrEqual(t, len(allLogs), 2) +} From 9df7f9e0b2c45c0f422d9ddf03aa90e2116f0f82 Mon Sep 17 00:00:00 2001 From: Alejandro Morera <60617457+MoreraAlejandro@users.noreply.github.com> Date: Thu, 27 Nov 2025 07:44:40 +0000 Subject: [PATCH 19/41] [exporter/prometheus] Expose native histograms (#43053) #### Description #### Link to tracking issue Closes #33703 #### Testing #### Documentation --- ...us-exporter-support-native-histograms.yaml | 27 ++ exporter/prometheusexporter/accumulator.go | 250 ++++++++++++ .../prometheusexporter/accumulator_test.go | 378 ++++++++++++++++++ exporter/prometheusexporter/collector.go | 90 +++++ exporter/prometheusexporter/collector_test.go | 149 +++++++ 5 files changed, 894 insertions(+) create mode 100644 .chloggen/feature_33703-prometheus-exporter-support-native-histograms.yaml diff --git a/.chloggen/feature_33703-prometheus-exporter-support-native-histograms.yaml b/.chloggen/feature_33703-prometheus-exporter-support-native-histograms.yaml new file mode 100644 index 0000000000000..21e10a208bf25 --- /dev/null +++ b/.chloggen/feature_33703-prometheus-exporter-support-native-histograms.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/prometheus + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add support to exponential histograms + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33703] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/prometheusexporter/accumulator.go b/exporter/prometheusexporter/accumulator.go index bc3508af2034f..b9edf9202a8b0 100644 --- a/exporter/prometheusexporter/accumulator.go +++ b/exporter/prometheusexporter/accumulator.go @@ -5,6 +5,7 @@ package prometheusexporter // import "github.com/open-telemetry/opentelemetry-co import ( "fmt" + "math" "sort" "strings" "sync" @@ -90,6 +91,8 @@ func (a *lastValueAccumulator) addMetric(metric pmetric.Metric, scopeName, scope return a.accumulateHistogram(metric, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes, resourceAttrs, now) case pmetric.MetricTypeSummary: return a.accumulateSummary(metric, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes, resourceAttrs, now) + case pmetric.MetricTypeExponentialHistogram: + return a.accumulateExponentialHistogram(metric, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes, resourceAttrs, now) default: a.logger.With( zap.String("data_type", metric.Type().String()), @@ -297,6 +300,77 @@ func (a *lastValueAccumulator) accumulateHistogram(metric pmetric.Metric, scopeN return n } +func (a *lastValueAccumulator) accumulateExponentialHistogram(metric pmetric.Metric, scopeName, scopeVersion, scopeSchemaURL string, scopeAttributes, resourceAttrs pcommon.Map, now time.Time) (n int) { + expHistogram := metric.ExponentialHistogram() + a.logger.Debug("Accumulate native histogram.....") + dps := expHistogram.DataPoints() + + for i := 0; i < dps.Len(); i++ { + ip := dps.At(i) + signature := timeseriesSignature(scopeName, scopeVersion, scopeSchemaURL, scopeAttributes, metric, ip.Attributes(), resourceAttrs) // uniquely identify this time series you are accumulating for + if ip.Flags().NoRecordedValue() { + a.registeredMetrics.Delete(signature) + return 0 + } + + v, ok := a.registeredMetrics.Load(signature) // a accumulates metric values for all times series. Get value for particular time series + if !ok { + // first data point + m := copyMetricMetadata(metric) + ip.CopyTo(m.SetEmptyExponentialHistogram().DataPoints().AppendEmpty()) + m.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + a.registeredMetrics.Store(signature, &accumulatedValue{value: m, resourceAttrs: resourceAttrs, scopeName: scopeName, scopeVersion: scopeVersion, scopeSchemaURL: scopeSchemaURL, scopeAttributes: scopeAttributes, updated: now}) + n++ + continue + } + mv := v.(*accumulatedValue) + + m := copyMetricMetadata(metric) + m.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + switch expHistogram.AggregationTemporality() { + case pmetric.AggregationTemporalityDelta: + pp := mv.value.ExponentialHistogram().DataPoints().At(0) // previous aggregated value for time range + if ip.StartTimestamp().AsTime() != pp.Timestamp().AsTime() { + // treat misalignment as restart and reset, or violation of single-writer principle and drop + a.logger.With( + zap.String("ip_start_time", ip.StartTimestamp().String()), + zap.String("pp_start_time", pp.StartTimestamp().String()), + zap.String("pp_timestamp", pp.Timestamp().String()), + zap.String("ip_timestamp", ip.Timestamp().String()), + ).Warn("Misaligned starting timestamps") + if !ip.StartTimestamp().AsTime().After(pp.Timestamp().AsTime()) { + a.logger.With( + zap.String("metric_name", metric.Name()), + ).Warn("Dropped misaligned histogram datapoint") + continue + } + a.logger.Debug("Treating it like reset") + ip.CopyTo(m.ExponentialHistogram().DataPoints().AppendEmpty()) + } else { + a.logger.Debug("Accumulate another histogram datapoint") + accumulateExponentialHistogramValues(pp, ip, m.ExponentialHistogram().DataPoints().AppendEmpty()) + } + case pmetric.AggregationTemporalityCumulative: + if ip.Timestamp().AsTime().Before(mv.value.ExponentialHistogram().DataPoints().At(0).Timestamp().AsTime()) { + // only keep datapoint with latest timestamp + continue + } + + ip.CopyTo(m.ExponentialHistogram().DataPoints().AppendEmpty()) + default: + // unsupported temporality + continue + } + + // Store the updated metric and advance count + a.registeredMetrics.Store(signature, &accumulatedValue{value: m, resourceAttrs: resourceAttrs, scopeName: scopeName, scopeVersion: scopeVersion, scopeSchemaURL: scopeSchemaURL, scopeAttributes: scopeAttributes, updated: now}) + n++ + } + + return n +} + // Collect returns a slice with relevant aggregated metrics and their resource attributes. func (a *lastValueAccumulator) Collect() ([]pmetric.Metric, []pcommon.Map, []string, []string, []string, []pcommon.Map) { a.logger.Debug("Accumulator collect called") @@ -406,3 +480,179 @@ func accumulateHistogramValues(prev, current, dest pmetric.HistogramDataPoint) { dest.ExplicitBounds().FromRaw(newer.ExplicitBounds().AsRaw()) } + +// calculateBucketUpperBound calculates the upper bound for an exponential histogram bucket +func calculateBucketUpperBound(scale, offset int32, index int) float64 { + // For exponential histograms with base = 2: + // Upper bound = 2^(scale + offset + index + 1) + return math.Pow(2, float64(scale+offset+int32(index)+1)) +} + +// filterBucketsForZeroThreshold filters buckets that fall below the zero threshold +// and returns the filtered buckets and the additional zero count +func filterBucketsForZeroThreshold(offset int32, counts []uint64, scale int32, zeroThreshold float64) (newOffset int32, filteredCounts []uint64, additionalZeroCount uint64) { + if len(counts) == 0 || zeroThreshold <= 0 { + return offset, counts, 0 + } + + additionalZeroCount = uint64(0) + filteredCounts = make([]uint64, 0, len(counts)) + newOffset = offset + + // Find the first bucket whose upper bound is > zeroThreshold + for i, count := range counts { + upperBound := calculateBucketUpperBound(scale, offset, i) + if upperBound > zeroThreshold { + filteredCounts = append(filteredCounts, counts[i:]...) + break + } + // This bucket's range falls entirely below the zero threshold + additionalZeroCount += count + newOffset = offset + int32(i) + 1 // Move offset to next bucket + } + + // If all buckets were filtered out, return empty buckets + if len(filteredCounts) == 0 { + return 0, nil, additionalZeroCount + } + + return newOffset, filteredCounts, additionalZeroCount +} + +func accumulateExponentialHistogramValues(prev, current, dest pmetric.ExponentialHistogramDataPoint) { + if current.Timestamp().AsTime().Before(prev.Timestamp().AsTime()) { + dest.SetStartTimestamp(current.StartTimestamp()) + prev.Attributes().CopyTo(dest.Attributes()) + dest.SetTimestamp(prev.Timestamp()) + } else { + dest.SetStartTimestamp(prev.StartTimestamp()) + current.Attributes().CopyTo(dest.Attributes()) + dest.SetTimestamp(current.Timestamp()) + } + + targetScale := min(current.Scale(), prev.Scale()) + dest.SetScale(targetScale) + + // Determine the new zero threshold (maximum of the two) + newZeroThreshold := max(prev.ZeroThreshold(), current.ZeroThreshold()) + dest.SetZeroThreshold(newZeroThreshold) + + // Downscale buckets to target scale + pPosOff, pPosCounts := downscaleBucketSide(prev.Positive().Offset(), prev.Positive().BucketCounts().AsRaw(), prev.Scale(), targetScale) + pNegOff, pNegCounts := downscaleBucketSide(prev.Negative().Offset(), prev.Negative().BucketCounts().AsRaw(), prev.Scale(), targetScale) + cPosOff, cPosCounts := downscaleBucketSide(current.Positive().Offset(), current.Positive().BucketCounts().AsRaw(), current.Scale(), targetScale) + cNegOff, cNegCounts := downscaleBucketSide(current.Negative().Offset(), current.Negative().BucketCounts().AsRaw(), current.Scale(), targetScale) + + // Filter buckets that fall below the new zero threshold + additionalZeroCount := uint64(0) + + // Filter positive buckets from previous histogram + if newZeroThreshold > prev.ZeroThreshold() { + var filteredZeroCount uint64 + pPosOff, pPosCounts, filteredZeroCount = filterBucketsForZeroThreshold(pPosOff, pPosCounts, targetScale, newZeroThreshold) + additionalZeroCount += filteredZeroCount + } + + // Filter positive buckets from current histogram + if newZeroThreshold > current.ZeroThreshold() { + var filteredZeroCount uint64 + cPosOff, cPosCounts, filteredZeroCount = filterBucketsForZeroThreshold(cPosOff, cPosCounts, targetScale, newZeroThreshold) + additionalZeroCount += filteredZeroCount + } + + // Merge the remaining buckets + mPosOff, mPosCounts := mergeBuckets(pPosOff, pPosCounts, cPosOff, cPosCounts) + mNegOff, mNegCounts := mergeBuckets(pNegOff, pNegCounts, cNegOff, cNegCounts) + + dest.Positive().SetOffset(mPosOff) + dest.Positive().BucketCounts().FromRaw(mPosCounts) + dest.Negative().SetOffset(mNegOff) + dest.Negative().BucketCounts().FromRaw(mNegCounts) + + // Set zero count including additional counts from filtered buckets + dest.SetZeroCount(prev.ZeroCount() + current.ZeroCount() + additionalZeroCount) + dest.SetCount(prev.Count() + current.Count()) + + if prev.HasSum() && current.HasSum() { + dest.SetSum(prev.Sum() + current.Sum()) + } + + switch { + case prev.HasMin() && current.HasMin(): + dest.SetMin(min(prev.Min(), current.Min())) + case prev.HasMin(): + dest.SetMin(prev.Min()) + case current.HasMin(): + dest.SetMin(current.Min()) + } + + switch { + case prev.HasMax() && current.HasMax(): + dest.SetMax(max(prev.Max(), current.Max())) + case prev.HasMax(): + dest.SetMax(prev.Max()) + case current.HasMax(): + dest.SetMax(current.Max()) + } +} + +func downscaleBucketSide(offset int32, counts []uint64, fromScale, targetScale int32) (int32, []uint64) { + if len(counts) == 0 || fromScale <= targetScale { + return offset, counts + } + shift := fromScale - targetScale + factor := int32(1) << shift + + first := offset + last := offset + int32(len(counts)) - 1 + newOffset := floorDivInt32(first, factor) + newLast := floorDivInt32(last, factor) + newLen := int(newLast - newOffset + 1) + for i := range counts { + k := offset + int32(i) + nk := floorDivInt32(k, factor) + if k%factor == 0 { + counts[nk-newOffset] = counts[i] + } else { + counts[nk-newOffset] += counts[i] + } + } + return newOffset, counts[:newLen] +} + +func mergeBuckets(offsetA int32, countsA []uint64, offsetB int32, countsB []uint64) (int32, []uint64) { + if len(countsA) == 0 && len(countsB) == 0 { + return 0, nil + } + if len(countsA) == 0 { + return offsetB, countsB + } + if len(countsB) == 0 { + return offsetA, countsA + } + minOffset := min(offsetB, offsetA) + lastA := offsetA + int32(len(countsA)) - 1 + lastB := offsetB + int32(len(countsB)) - 1 + maxLast := max(lastB, lastA) + newBucketLen := int(maxLast - minOffset + 1) + newBucketCount := make([]uint64, newBucketLen) + for i := range countsA { + k := offsetA + int32(i) + newBucketCount[k-minOffset] += countsA[i] + } + for i := range countsB { + k := offsetB + int32(i) + newBucketCount[k-minOffset] += countsB[i] + } + return minOffset, newBucketCount +} + +func floorDivInt32(a, b int32) int32 { + if b <= 0 { + return 0 + } + if a >= 0 { + return a / b + } + return -(((-a) + b - 1) / b) +} diff --git a/exporter/prometheusexporter/accumulator_test.go b/exporter/prometheusexporter/accumulator_test.go index 0e87a2c1d90bb..ce278f7cdfcbc 100644 --- a/exporter/prometheusexporter/accumulator_test.go +++ b/exporter/prometheusexporter/accumulator_test.go @@ -643,6 +643,384 @@ func TestAccumulateDroppedMetrics(t *testing.T) { } } +func TestAccumulateDeltaToCumulativeExponentialHistogram(t *testing.T) { + appendDeltaNative := func(startTs, ts time.Time, scale, posOff int32, pos []uint64, negOff int32, neg []uint64, + zeroCount, count uint64, sum float64, minSet bool, minim float64, maxSet bool, maxim float64, metrics pmetric.MetricSlice, + ) pmetric.Metric { + metric := metrics.AppendEmpty() + metric.SetName("test_native_hist") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + dp := metric.ExponentialHistogram().DataPoints().AppendEmpty() + dp.SetScale(scale) + dp.Positive().SetOffset(posOff) + dp.Positive().BucketCounts().FromRaw(pos) + dp.Negative().SetOffset(negOff) + dp.Negative().BucketCounts().FromRaw(neg) + dp.SetZeroCount(zeroCount) + dp.SetCount(count) + dp.SetZeroThreshold(0) + if minSet { + dp.SetMin(minim) + } + if maxSet { + dp.SetMax(maxim) + } + dp.SetSum(sum) + dp.Attributes().PutStr("label_1", "1") + dp.Attributes().PutStr("label_2", "2") + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(startTs)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + return metric + } + + t.Run("AccumulateAlignedMergesAndDownscales", func(t *testing.T) { + // Two aligned deltas with different scales; verify downscale to min scale and merge. + startTs := time.Now().Add(-6 * time.Second) + ts1 := time.Now().Add(-5 * time.Second) + ts2 := time.Now().Add(-4 * time.Second) + + rm := pmetric.NewResourceMetrics() + ilm := rm.ScopeMetrics().AppendEmpty() + ilm.Scope().SetName("test") + + // First delta: scale=2, positive offset -2 counts [1,2,3]; negative offset 0 counts [4] + m1 := appendDeltaNative(startTs, ts1, 2, -2, []uint64{1, 2, 3}, 0, []uint64{4}, 1, 6, 3.5, true, 0.9, true, 9.0, ilm.Metrics()) + _ = m1 + // Second delta: scale=1, positive offset -1 counts [4,5]; negative offset 0 counts [1,1] + m2 := appendDeltaNative(ts1, ts2, 1, -1, []uint64{4, 5}, 0, []uint64{1, 1}, 2, 7, 4.5, true, 0.5, false, 0, ilm.Metrics()) + + a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) + n := a.Accumulate(rm) + require.Equal(t, 2, n) + + // Build signature using attributes of second metric + sig := timeseriesSignature(ilm.Scope().Name(), ilm.Scope().Version(), ilm.SchemaUrl(), ilm.Scope().Attributes(), ilm.Metrics().At(0), m2.ExponentialHistogram().DataPoints().At(0).Attributes(), pcommon.NewMap()) + got, ok := a.registeredMetrics.Load(sig) + require.True(t, ok) + dp := got.(*accumulatedValue).value.ExponentialHistogram().DataPoints().At(0) + + // Expect scale = min(2,1) = 1 + require.Equal(t, int32(1), dp.Scale()) + + // Positive buckets: prev downscales by factor 2 -> offset -1, counts [-2,-1,0] => [1+2, 3] => [3,3]; merge with current [-1:4,0:5] => [-1:7, 0:8] + require.Equal(t, int32(-1), dp.Positive().Offset()) + require.Equal(t, 2, dp.Positive().BucketCounts().Len()) + require.Equal(t, uint64(7), dp.Positive().BucketCounts().At(0)) + require.Equal(t, uint64(8), dp.Positive().BucketCounts().At(1)) + + // Negative buckets: prev scale=2 offset 0 counts [4] downscale to scale 1 with factor 2 -> offset 0, counts [4]; merge with current offset 0 counts [1,1] -> [5,1] + require.Equal(t, int32(0), dp.Negative().Offset()) + require.Equal(t, 2, dp.Negative().BucketCounts().Len()) + require.Equal(t, uint64(5), dp.Negative().BucketCounts().At(0)) + require.Equal(t, uint64(1), dp.Negative().BucketCounts().At(1)) + + // Count, ZeroCount, Sum, Min, Max + require.Equal(t, uint64(6+7), dp.Count()) + require.Equal(t, uint64(1+2), dp.ZeroCount()) + require.InDelta(t, 3.5+4.5, dp.Sum(), 1e-12) + require.True(t, dp.HasMin()) + require.InDelta(t, 0.5, dp.Min(), 1e-12) // min of 0.9 and 0.5 + require.True(t, dp.HasMax()) + require.InDelta(t, 9.0, dp.Max(), 1e-12) // max from first since second had no max + require.Equal(t, ts2.Unix(), dp.Timestamp().AsTime().Unix()) + }) + + t.Run("CumulativeKeepLatest", func(t *testing.T) { + rm := pmetric.NewResourceMetrics() + ilm := rm.ScopeMetrics().AppendEmpty() + ilm.Scope().SetName("test") + + metric := ilm.Metrics().AppendEmpty() + metric.SetName("test_native_hist") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + dp1 := metric.ExponentialHistogram().DataPoints().AppendEmpty() + dp1.SetScale(1) + dp1.Positive().SetOffset(0) + dp1.Positive().BucketCounts().FromRaw([]uint64{1, 2}) + dp1.Negative().SetOffset(0) + dp1.Negative().BucketCounts().FromRaw([]uint64{0}) + dp1.SetCount(3) + dp1.SetZeroCount(0) + dp1.SetSum(10) + dp1.SetTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(-3 * time.Second))) + dp1.Attributes().PutStr("label_1", "1") + + dp2 := metric.ExponentialHistogram().DataPoints().AppendEmpty() + dp2.SetScale(1) + dp2.Positive().SetOffset(0) + dp2.Positive().BucketCounts().FromRaw([]uint64{4, 5}) + dp2.Negative().SetOffset(0) + dp2.Negative().BucketCounts().FromRaw([]uint64{0}) + dp2.SetCount(9) + dp2.SetZeroCount(0) + dp2.SetSum(20) + now := time.Now().Add(-2 * time.Second) + dp2.SetTimestamp(pcommon.NewTimestampFromTime(now)) + dp2.Attributes().PutStr("label_1", "1") + + a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) + n := a.Accumulate(rm) + require.Equal(t, 2, n) + + sig := timeseriesSignature(ilm.Scope().Name(), ilm.Scope().Version(), ilm.SchemaUrl(), ilm.Scope().Attributes(), metric, dp2.Attributes(), pcommon.NewMap()) + got, ok := a.registeredMetrics.Load(sig) + require.True(t, ok) + dp := got.(*accumulatedValue).value.ExponentialHistogram().DataPoints().At(0) + require.Equal(t, uint64(9), dp.Count()) + require.Equal(t, uint64(0), dp.ZeroCount()) + require.InDelta(t, 20.0, dp.Sum(), 1e-12) + require.Equal(t, now.Unix(), dp.Timestamp().AsTime().Unix()) + require.Equal(t, uint64(4), dp.Positive().BucketCounts().At(0)) + require.Equal(t, uint64(5), dp.Positive().BucketCounts().At(1)) + }) + + t.Run("DeltaMisalignedDropOrReset", func(t *testing.T) { + // First: ts1; Second: start before prev end -> drop; Third: start after prev end -> reset + start1 := time.Now().Add(-6 * time.Second) + ts1 := time.Now().Add(-5 * time.Second) + start2 := time.Now().Add(-6 * time.Second) // not equal to prev end; and NOT after prev end -> drop + ts2 := time.Now().Add(-4 * time.Second) + start3 := time.Now().Add(-3 * time.Second) // strictly after prev end -> reset + ts3 := time.Now().Add(-2 * time.Second) + + rm := pmetric.NewResourceMetrics() + ilm := rm.ScopeMetrics().AppendEmpty() + ilm.Scope().SetName("test") + m1 := appendDeltaNative(start1, ts1, 1, 0, []uint64{1}, 0, nil, 0, 1, 1.0, false, 0, false, 0, ilm.Metrics()) + _ = m1 + m2 := appendDeltaNative(start2, ts2, 1, 0, []uint64{2}, 0, nil, 0, 2, 2.0, false, 0, false, 0, ilm.Metrics()) + m3 := appendDeltaNative(start3, ts3, 1, 0, []uint64{3}, 0, nil, 0, 3, 3.0, false, 0, false, 0, ilm.Metrics()) + _ = m2 + _ = m3 + + a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) + n := a.Accumulate(rm) + // First stored, second dropped, third reset and stored: total 2 + require.Equal(t, 2, n) + + sig := timeseriesSignature(ilm.Scope().Name(), ilm.Scope().Version(), ilm.SchemaUrl(), ilm.Scope().Attributes(), ilm.Metrics().At(0), m3.ExponentialHistogram().DataPoints().At(0).Attributes(), pcommon.NewMap()) + got, ok := a.registeredMetrics.Load(sig) + require.True(t, ok) + dp := got.(*accumulatedValue).value.ExponentialHistogram().DataPoints().At(0) + require.Equal(t, uint64(3), dp.Count()) + require.InDelta(t, 3.0, dp.Sum(), 1e-12) + require.Equal(t, uint64(3), dp.Positive().BucketCounts().At(0)) + require.Equal(t, ts3.Unix(), dp.Timestamp().AsTime().Unix()) + }) +} + +func TestAccumulateExponentialHistogramZeroThresholds(t *testing.T) { + appendDeltaNativeWithZeroThreshold := func(startTs, ts time.Time, scale, posOff int32, pos []uint64, negOff int32, neg []uint64, + zeroCount, count uint64, sum, zeroThreshold float64, metrics pmetric.MetricSlice, + ) pmetric.Metric { + metric := metrics.AppendEmpty() + metric.SetName("test_native_hist_zero_threshold") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + dp := metric.ExponentialHistogram().DataPoints().AppendEmpty() + dp.SetScale(scale) + dp.Positive().SetOffset(posOff) + dp.Positive().BucketCounts().FromRaw(pos) + dp.Negative().SetOffset(negOff) + dp.Negative().BucketCounts().FromRaw(neg) + dp.SetZeroCount(zeroCount) + dp.SetCount(count) + dp.SetZeroThreshold(zeroThreshold) + dp.SetSum(sum) + dp.Attributes().PutStr("label_1", "1") + dp.Attributes().PutStr("label_2", "2") + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(startTs)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + return metric + } + + t.Run("MergeWithHigherZeroThreshold", func(t *testing.T) { + // Test merging two histograms where one has a higher zero threshold + // Some buckets from the histogram with lower threshold should be moved to zero count + startTs := time.Now().Add(-6 * time.Second) + ts1 := time.Now().Add(-5 * time.Second) + ts2 := time.Now().Add(-4 * time.Second) + + rm := pmetric.NewResourceMetrics() + ilm := rm.ScopeMetrics().AppendEmpty() + ilm.Scope().SetName("test") + + // First histogram: scale=0, offset=0, zeroThreshold=0.5, buckets representing ranges [1,2), [2,4), [4,8) + // Bucket 0: [2^0, 2^1) = [1,2), count=2 + // Bucket 1: [2^1, 2^2) = [2,4), count=3 + // Bucket 2: [2^2, 2^3) = [4,8), count=1 + appendDeltaNativeWithZeroThreshold(startTs, ts1, 0, 0, []uint64{2, 3, 1}, 0, nil, 1, 7, 15.0, 0.5, ilm.Metrics()) + + // Second histogram: scale=0, offset=0, zeroThreshold=3.0, buckets representing same ranges + // With new zero threshold of 3.0, bucket 0 [1,2) should be moved to zero count + // Only buckets [2,4) and [4,8) should remain + m2 := appendDeltaNativeWithZeroThreshold(ts1, ts2, 0, 0, []uint64{1, 2, 4}, 0, nil, 2, 9, 20.0, 3.0, ilm.Metrics()) + + a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) + n := a.Accumulate(rm) + require.Equal(t, 2, n) + + sig := timeseriesSignature(ilm.Scope().Name(), ilm.Scope().Version(), ilm.SchemaUrl(), ilm.Scope().Attributes(), ilm.Metrics().At(0), m2.ExponentialHistogram().DataPoints().At(0).Attributes(), pcommon.NewMap()) + got, ok := a.registeredMetrics.Load(sig) + require.True(t, ok) + dp := got.(*accumulatedValue).value.ExponentialHistogram().DataPoints().At(0) + + // Expect zero threshold to be the maximum (3.0) + require.InDelta(t, 3.0, dp.ZeroThreshold(), 1e-12) + + // Zero count should include original zero counts plus bucket 0 from first histogram + // Original zero counts: 1 + 2 = 3, plus bucket 0 from first histogram: 2 + require.Equal(t, uint64(3+2), dp.ZeroCount()) + + // After filtering first histogram: offset=1, counts=[3,1] (bucket 0 removed) + // Second histogram: offset=0, counts=[1,2,4] + // Merged result: offset=0, counts=[1, 3+2, 1+4] = [1, 5, 5] + require.Equal(t, int32(0), dp.Positive().Offset()) + require.Equal(t, 3, dp.Positive().BucketCounts().Len()) + require.Equal(t, uint64(1), dp.Positive().BucketCounts().At(0)) // bucket 0: [1,2) from second histogram + require.Equal(t, uint64(5), dp.Positive().BucketCounts().At(1)) // bucket 1: [2,4) = 3+2 + require.Equal(t, uint64(5), dp.Positive().BucketCounts().At(2)) // bucket 2: [4,8) = 1+4 + + // Total count should remain the same + require.Equal(t, uint64(16), dp.Count()) + + // Sum should be sum of both histograms + require.InDelta(t, 35.0, dp.Sum(), 1e-12) + }) + + t.Run("MergeSameZeroThreshold", func(t *testing.T) { + // Test merging two histograms with the same zero threshold + // No buckets should be moved to zero count + startTs := time.Now().Add(-6 * time.Second) + ts1 := time.Now().Add(-5 * time.Second) + ts2 := time.Now().Add(-4 * time.Second) + + rm := pmetric.NewResourceMetrics() + ilm := rm.ScopeMetrics().AppendEmpty() + ilm.Scope().SetName("test") + + appendDeltaNativeWithZeroThreshold(startTs, ts1, 0, 0, []uint64{2, 3}, 0, nil, 1, 6, 10.0, 1.0, ilm.Metrics()) + m2 := appendDeltaNativeWithZeroThreshold(ts1, ts2, 0, 0, []uint64{1, 2}, 0, nil, 2, 5, 8.0, 1.0, ilm.Metrics()) + + a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) + n := a.Accumulate(rm) + require.Equal(t, 2, n) + + sig := timeseriesSignature(ilm.Scope().Name(), ilm.Scope().Version(), ilm.SchemaUrl(), ilm.Scope().Attributes(), ilm.Metrics().At(0), m2.ExponentialHistogram().DataPoints().At(0).Attributes(), pcommon.NewMap()) + got, ok := a.registeredMetrics.Load(sig) + require.True(t, ok) + dp := got.(*accumulatedValue).value.ExponentialHistogram().DataPoints().At(0) + + // Zero threshold should remain 1.0 + require.InDelta(t, 1.0, dp.ZeroThreshold(), 1e-12) + + // Zero count should just be the sum of original zero counts + require.Equal(t, uint64(1+2), dp.ZeroCount()) + + // Positive buckets should be merged normally + require.Equal(t, int32(0), dp.Positive().Offset()) + require.Equal(t, 2, dp.Positive().BucketCounts().Len()) + require.Equal(t, uint64(3), dp.Positive().BucketCounts().At(0)) // 2+1 + require.Equal(t, uint64(5), dp.Positive().BucketCounts().At(1)) // 3+2 + + // Total count + require.Equal(t, uint64(11), dp.Count()) + + // Sum + require.InDelta(t, 18.0, dp.Sum(), 1e-12) + }) + + t.Run("AllBucketsMovedToZeroCount", func(t *testing.T) { + // Test case where all buckets from one histogram get moved to zero count + startTs := time.Now().Add(-6 * time.Second) + ts1 := time.Now().Add(-5 * time.Second) + ts2 := time.Now().Add(-4 * time.Second) + + rm := pmetric.NewResourceMetrics() + ilm := rm.ScopeMetrics().AppendEmpty() + ilm.Scope().SetName("test") + + // First histogram: small buckets, zero threshold = 0.1 + // Buckets: [1,2), [2,4) with counts [3, 2] + appendDeltaNativeWithZeroThreshold(startTs, ts1, 0, 0, []uint64{3, 2}, 0, nil, 1, 6, 5.0, 0.1, ilm.Metrics()) + + // Second histogram: higher zero threshold = 10.0, which is greater than all bucket upper bounds from first histogram + // Buckets: [4,8), [8,16) with counts [1, 1] + m2 := appendDeltaNativeWithZeroThreshold(ts1, ts2, 0, 2, []uint64{1, 1}, 0, nil, 5, 7, 12.0, 10.0, ilm.Metrics()) + + a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) + n := a.Accumulate(rm) + require.Equal(t, 2, n) + + sig := timeseriesSignature(ilm.Scope().Name(), ilm.Scope().Version(), ilm.SchemaUrl(), ilm.Scope().Attributes(), ilm.Metrics().At(0), m2.ExponentialHistogram().DataPoints().At(0).Attributes(), pcommon.NewMap()) + got, ok := a.registeredMetrics.Load(sig) + require.True(t, ok) + dp := got.(*accumulatedValue).value.ExponentialHistogram().DataPoints().At(0) + + // Zero threshold should be 10.0 + require.InDelta(t, 10.0, dp.ZeroThreshold(), 1e-12) + + // Zero count should include all buckets from first histogram plus original zero counts + // Original zero counts: 1 + 5 = 6, plus all buckets from first histogram: 3 + 2 = 5 + require.Equal(t, uint64(6+5), dp.ZeroCount()) + + // Positive buckets should only include buckets from second histogram that weren't filtered + require.Equal(t, int32(2), dp.Positive().Offset()) + require.Equal(t, 2, dp.Positive().BucketCounts().Len()) + require.Equal(t, uint64(1), dp.Positive().BucketCounts().At(0)) // bucket 2: [4,8) + require.Equal(t, uint64(1), dp.Positive().BucketCounts().At(1)) // bucket 3: [8,16) + + // Total count + require.Equal(t, uint64(13), dp.Count()) + + // Sum + require.InDelta(t, 17.0, dp.Sum(), 1e-12) + }) + + t.Run("LowerZeroThresholdHasNoBuckets", func(t *testing.T) { + // Test edge case where histogram with lower zero threshold has no positive buckets + startTs := time.Now().Add(-6 * time.Second) + ts1 := time.Now().Add(-5 * time.Second) + ts2 := time.Now().Add(-4 * time.Second) + + rm := pmetric.NewResourceMetrics() + ilm := rm.ScopeMetrics().AppendEmpty() + ilm.Scope().SetName("test") + + // First histogram: only zero count, no positive buckets + appendDeltaNativeWithZeroThreshold(startTs, ts1, 0, 0, nil, 0, nil, 5, 5, 0.0, 0.1, ilm.Metrics()) + + // Second histogram: has buckets, higher zero threshold + m2 := appendDeltaNativeWithZeroThreshold(ts1, ts2, 0, 1, []uint64{2, 3}, 0, nil, 3, 8, 15.0, 2.0, ilm.Metrics()) + + a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) + n := a.Accumulate(rm) + require.Equal(t, 2, n) + + sig := timeseriesSignature(ilm.Scope().Name(), ilm.Scope().Version(), ilm.SchemaUrl(), ilm.Scope().Attributes(), ilm.Metrics().At(0), m2.ExponentialHistogram().DataPoints().At(0).Attributes(), pcommon.NewMap()) + got, ok := a.registeredMetrics.Load(sig) + require.True(t, ok) + dp := got.(*accumulatedValue).value.ExponentialHistogram().DataPoints().At(0) + + // Zero threshold should be 2.0 + require.InDelta(t, 2.0, dp.ZeroThreshold(), 1e-12) + + // Zero count should be sum of original zero counts + require.Equal(t, uint64(5+3), dp.ZeroCount()) + + // Positive buckets should be from second histogram only + require.Equal(t, int32(1), dp.Positive().Offset()) + require.Equal(t, 2, dp.Positive().BucketCounts().Len()) + require.Equal(t, uint64(2), dp.Positive().BucketCounts().At(0)) + require.Equal(t, uint64(3), dp.Positive().BucketCounts().At(1)) + + // Total count + require.Equal(t, uint64(13), dp.Count()) + + // Sum + require.InDelta(t, 15.0, dp.Sum(), 1e-12) + }) +} + func TestTimeseriesSignatureNotMutating(t *testing.T) { attrs := pcommon.NewMap() attrs.PutStr("label_2", "2") diff --git a/exporter/prometheusexporter/collector.go b/exporter/prometheusexporter/collector.go index e508e8d07b82e..b005622d82080 100644 --- a/exporter/prometheusexporter/collector.go +++ b/exporter/prometheusexporter/collector.go @@ -183,6 +183,8 @@ func (c *collector) convertMetric(metric pmetric.Metric, resourceAttrs pcommon.M return c.convertSum(metric, resourceAttrs, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes) case pmetric.MetricTypeHistogram: return c.convertDoubleHistogram(metric, resourceAttrs, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes) + case pmetric.MetricTypeExponentialHistogram: + return c.convertExponentialHistogram(metric, resourceAttrs, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes) case pmetric.MetricTypeSummary: return c.convertSummary(metric, resourceAttrs, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes) } @@ -190,6 +192,94 @@ func (c *collector) convertMetric(metric pmetric.Metric, resourceAttrs pcommon.M return nil, errUnknownMetricType } +// defaultZeroThreshold matches the remote-write translator's default for native histograms +// when an explicit zero threshold is not provided in the datapoint. +const ( + defaultZeroThreshold = 1e-128 + cbnhScale = -53 +) + +func bucketsToNativeMap(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) map[int]int64 { + counts := buckets.BucketCounts() + if counts.Len() == 0 { + return nil + } + out := make(map[int]int64, counts.Len()) + baseOffset := buckets.Offset() + for i := 0; i < counts.Len(); i++ { + // Effective bucket index after downscaling: ((offset + i) >> scaleDown) + 1 + idx := (int32(i) + baseOffset) >> scaleDown + idx++ + out[int(idx)] += int64(counts.At(i)) + } + return out +} + +func (c *collector) convertExponentialHistogram(metric pmetric.Metric, resourceAttrs pcommon.Map, scopeName, scopeVersion, scopeSchemaURL string, scopeAttributes pcommon.Map) (prometheus.Metric, error) { + dp := metric.ExponentialHistogram().DataPoints().At(0) + + // Build metadata/labels first. + desc, attributes, err := c.getMetricMetadata(metric, dto.MetricType_HISTOGRAM.Enum(), dp.Attributes(), resourceAttrs, scopeName, scopeVersion, scopeSchemaURL, scopeAttributes) + if err != nil { + return nil, err + } + + schema := dp.Scale() + + // TODO: implement custom bucket native histograms #43981 + if schema == cbnhScale { + return nil, errors.New("custom bucket native histograms (CBNH) are still not implemented") + } + if schema < -4 { + return nil, fmt.Errorf("cannot convert exponential to native histogram: scale must be >= -4, was %d", schema) + } + var scaleDown int32 + if schema > 8 { + scaleDown = schema - 8 + schema = 8 + } + + pos := bucketsToNativeMap(dp.Positive(), scaleDown) + neg := bucketsToNativeMap(dp.Negative(), scaleDown) + + zeroThresh := dp.ZeroThreshold() + if zeroThresh == 0 { + zeroThresh = defaultZeroThreshold + } + + // Use created timestamp if start time is set (> 0), else zero value. + created := time.Time{} + if dp.StartTimestamp().AsTime().Unix() > 0 { + created = dp.StartTimestamp().AsTime() + } + + sumVal := 0.0 + if dp.HasSum() { + sumVal = dp.Sum() + } + + m, err := prometheus.NewConstNativeHistogram( + desc, + dp.Count(), + sumVal, + pos, + neg, + dp.ZeroCount(), + schema, + zeroThresh, + created, + attributes..., + ) + if err != nil { + return nil, err + } + + if c.sendTimestamps { + return prometheus.NewMetricWithTimestamp(dp.Timestamp().AsTime(), m), nil + } + return m, nil +} + func (c *collector) getMetricMetadata(metric pmetric.Metric, mType *dto.MetricType, attributes, resourceAttrs pcommon.Map, scopeName, scopeVersion, scopeSchemaURL string, scopeAttributes pcommon.Map) (*prometheus.Desc, []string, error) { name, err := c.metricNamer.Build(prom.TranslatorMetricFromOtelMetric(metric)) if err != nil { diff --git a/exporter/prometheusexporter/collector_test.go b/exporter/prometheusexporter/collector_test.go index 828ec71c02335..1cbe17bb88e06 100644 --- a/exporter/prometheusexporter/collector_test.go +++ b/exporter/prometheusexporter/collector_test.go @@ -792,6 +792,155 @@ func TestAccumulateHistograms(t *testing.T) { } } +func TestAccumulateExponentialHistograms(t *testing.T) { + tests := []struct { + name string + metric func(time.Time, bool) pmetric.Metric + wantCount uint64 + wantSum float64 + wantSchema int32 + wantZeroCount uint64 + wantZeroThresh float64 + }{ + { + name: "NativeHistogram basic (default zero threshold)", + wantCount: 10, + wantSum: 42.0, + wantSchema: 0, + wantZeroCount: 4, + wantZeroThresh: defaultZeroThreshold, + metric: func(ts time.Time, withStartTime bool) (metric pmetric.Metric) { + metric = pmetric.NewMetric() + metric.SetName("test_native_hist") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + dp := metric.ExponentialHistogram().DataPoints().AppendEmpty() + dp.SetScale(0) + dp.Positive().SetOffset(0) + dp.Positive().BucketCounts().FromRaw([]uint64{1, 2}) + dp.Negative().SetOffset(0) + dp.Negative().BucketCounts().FromRaw([]uint64{3}) + dp.SetZeroCount(4) + dp.SetCount(10) + dp.SetSum(42.0) + dp.SetZeroThreshold(0) // trigger defaultZeroThreshold + dp.Attributes().PutStr("label_1", "1") + dp.Attributes().PutStr("label_2", "2") + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + if withStartTime { + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(ts)) + } + + return metric + }, + }, + { + name: "NativeHistogram scale down (>8 to 8)", + wantCount: 6, + wantSum: 5.5, + wantSchema: 8, + wantZeroCount: 1, + wantZeroThresh: 0.25, + metric: func(ts time.Time, withStartTime bool) (metric pmetric.Metric) { + metric = pmetric.NewMetric() + metric.SetName("test_native_hist_scaled") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + dp := metric.ExponentialHistogram().DataPoints().AppendEmpty() + dp.SetScale(10) // will be downscaled to 8 + dp.Positive().SetOffset(0) + dp.Positive().BucketCounts().FromRaw([]uint64{2, 3}) + dp.Negative().SetOffset(-1) + dp.Negative().BucketCounts().FromRaw([]uint64{0, 0}) + dp.SetZeroCount(1) + dp.SetCount(6) + dp.SetSum(5.5) + dp.SetZeroThreshold(0.25) + dp.Attributes().PutStr("label_1", "1") + dp.Attributes().PutStr("label_2", "2") + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + if withStartTime { + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(ts)) + } + return metric + }, + }, + } + + for _, tt := range tests { + for _, sendTimestamp := range []bool{true, false} { + name := tt.name + if sendTimestamp { + name += "/WithTimestamp" + } + t.Run(name, func(t *testing.T) { + ts := time.Now() + metric := tt.metric(ts, sendTimestamp) + c := newCollector(&Config{SendTimestamps: sendTimestamp}, zap.NewNop()) + // Replace accumulator with mock for test control + c.accumulator = &mockAccumulator{ + []pmetric.Metric{metric}, + pcommon.NewMap(), + []string{""}, + []string{""}, + []string{""}, + []pcommon.Map{pcommon.NewMap()}, + } + + ch := make(chan prometheus.Metric, 1) + go func() { + c.Collect(ch) + close(ch) + }() + + n := 0 + for m := range ch { + n++ + require.Contains(t, m.Desc().String(), "fqName: \""+metric.Name()+"\"") + + pbMetric := io_prometheus_client.Metric{} + require.NoError(t, m.Write(&pbMetric)) + + // Assert timestamp behavior + if sendTimestamp { + require.Equal(t, ts.UnixNano()/1e6, *(pbMetric.TimestampMs)) + // withStartTime is tied to sendTimestamp in this test + require.Equal(t, timestamppb.New(ts), pbMetric.Histogram.CreatedTimestamp) + } else { + require.Nil(t, pbMetric.TimestampMs) + // Native histograms always include CreatedTimestamp; when no start + // time is set, it encodes the zero time (0001-01-01) rather than nil. + require.NotNil(t, pbMetric.Histogram.CreatedTimestamp) + require.True(t, pbMetric.Histogram.CreatedTimestamp.AsTime().IsZero()) + } + + h := pbMetric.Histogram + require.NotNil(t, h) + require.Equal(t, tt.wantCount, h.GetSampleCount()) + require.InDelta(t, tt.wantSum, h.GetSampleSum(), 1e-12) + require.Equal(t, tt.wantSchema, h.GetSchema()) + require.Equal(t, tt.wantZeroCount, h.GetZeroCount()) + require.InDelta(t, tt.wantZeroThresh, h.GetZeroThreshold(), 0) + } + require.Equal(t, 1, n) + }) + } + } +} + +func TestConvertExponentialHistogramInvalidScale(t *testing.T) { + metric := pmetric.NewMetric() + metric.SetName("invalid_native_hist") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + dp := metric.ExponentialHistogram().DataPoints().AppendEmpty() + dp.SetScale(-5) // invalid: must be >= -4 + dp.SetCount(0) + dp.SetZeroThreshold(0) + + c := newCollector(&Config{}, zap.NewNop()) + _, err := c.convertExponentialHistogram(metric, pcommon.NewMap(), "", "", "", pcommon.NewMap()) + require.Error(t, err) + require.Contains(t, err.Error(), "scale must be >= -4") +} + func TestAccumulateSummary(t *testing.T) { fillQuantileValue := func(pN, value float64, dest pmetric.SummaryDataPointValueAtQuantile) { dest.SetQuantile(pN) From 3abb3be9e75803aca77b116d778df17a2bebdd89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Crist=C3=B2fol=20Torrens?= Date: Thu, 27 Nov 2025 08:54:25 +0100 Subject: [PATCH 20/41] [awslogs_encoding] Parse invalid request field in ELB access logs (#44475) #### Description Parses invalid requests logged by ELB logs #### Link to tracking issue Fixes #44233 #### Testing Added tests for function `parseRequestField(raw string) (method, uri, protoName, protoVersion string, err error)` --------- Signed-off-by: Kavindu Dodanduwa Co-authored-by: Kavindu Dodanduwa Co-authored-by: Kavindu Dodanduwa --- ...rse-invalid-request-in-elbaccess-logs.yaml | 27 +++++++ .../unmarshaler/elb-access-log/elb.go | 28 ++++--- .../unmarshaler/elb-access-log/elb_test.go | 81 +++++++++++++++++++ 3 files changed, 126 insertions(+), 10 deletions(-) create mode 100644 .chloggen/issue-44233-parse-invalid-request-in-elbaccess-logs.yaml diff --git a/.chloggen/issue-44233-parse-invalid-request-in-elbaccess-logs.yaml b/.chloggen/issue-44233-parse-invalid-request-in-elbaccess-logs.yaml new file mode 100644 index 0000000000000..d21c0e601457f --- /dev/null +++ b/.chloggen/issue-44233-parse-invalid-request-in-elbaccess-logs.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: extension/awslogs_encoding + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Fix ALB log `request_line` parsing for valid formats and avoid errors" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44233] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb.go b/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb.go index 50ae9d25125be..5fc84c8a52bba 100644 --- a/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb.go +++ b/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb.go @@ -488,20 +488,23 @@ func extractFields(logLine string) ([]string, error) { func parseRequestField(raw string) (method, uri, protoName, protoVersion string, err error) { method, remaining, _ := strings.Cut(raw, " ") if method == "" { - err = fmt.Errorf("unexpected: request field %q has no method", raw) + err = fmt.Errorf("unexpected: field %q has no method section", raw) return method, uri, protoName, protoVersion, err } - uri, remaining, _ = strings.Cut(remaining, " ") - if uri == "" { - err = fmt.Errorf("unexpected: request field %q has no URI", raw) - return method, uri, protoName, protoVersion, err - } + var protocol string - protocol, leftover, _ := strings.Cut(remaining, " ") - if protocol == "" || leftover != "" { - err = fmt.Errorf(`request field %q does not match expected format " "`, raw) + index := strings.LastIndex(remaining, " ") + switch { + case index == -1: + err = fmt.Errorf("unexpected: field %q has no protocol/version section", raw) return method, uri, protoName, protoVersion, err + case index == len(remaining)-1: + uri = strings.TrimSpace(remaining) + protocol = unknownField + default: + uri = remaining[:index] + protocol = remaining[index+1:] } protoName, protoVersion, err = netProtocol(protocol) @@ -509,11 +512,16 @@ func parseRequestField(raw string) (method, uri, protoName, protoVersion string, err = fmt.Errorf("invalid protocol in request field: %w", err) return method, uri, protoName, protoVersion, err } - return method, uri, protoName, protoVersion, err + + return method, uri, protoName, protoVersion, nil } // netProtocol returns protocol name and version based on proto value func netProtocol(proto string) (string, string, error) { + if proto == unknownField { + return unknownField, unknownField, nil + } + name, version, found := strings.Cut(proto, "/") if !found || name == "" || version == "" { return "", "", errors.New(`request uri protocol does not follow expected scheme "/"`) diff --git a/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb_test.go b/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb_test.go index b0af3dcd04185..0546ec0545888 100644 --- a/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb_test.go +++ b/extension/encoding/awslogsencodingextension/internal/unmarshaler/elb-access-log/elb_test.go @@ -59,3 +59,84 @@ func Test_scanField(t *testing.T) { }) } } + +func Test_parseRequestField(t *testing.T) { + tests := []struct { + name string + input string + wantMethod string + wantURI string + wantProtoName string + wantProtoVersion string + wantErr bool + }{ + { + name: "Valid input with expected sections", + input: "GET http://example.com/ HTTP/1.1", + wantMethod: "GET", + wantURI: "http://example.com/", + wantProtoName: "http", + wantProtoVersion: "1.1", + }, + { + name: "Missing protocol/version", + input: "GET http://example.com/ -", + wantMethod: "GET", + wantURI: "http://example.com/", + wantProtoName: "-", + wantProtoVersion: "-", + }, + { + name: "URI section with spaces", + input: "GET http://example.com/path to somewhere HTTP/1.1", + wantMethod: "GET", + wantURI: "http://example.com/path to somewhere", + wantProtoName: "http", + wantProtoVersion: "1.1", + }, + { + name: "Input with spaces and missing protocol/version", + input: "- http://example.com/path to somewhere- -", + wantMethod: "-", + wantURI: "http://example.com/path to somewhere-", + wantProtoName: "-", + wantProtoVersion: "-", + }, + { + name: "Missing method and protocol/version", + input: "- http://example.com:80- ", + wantErr: false, + wantMethod: "-", + wantURI: "http://example.com:80-", + wantProtoName: "-", + wantProtoVersion: "-", + }, + { + name: "Invalid input with missing expected sections", + input: "GET /", + wantMethod: "GET", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotMethod, gotURI, gotProtoName, gotProtoVersion, err := parseRequestField(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("parseRequestField() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotMethod != tt.wantMethod { + t.Errorf("parseRequestField() gotMethod = %v, want %v", gotMethod, tt.wantMethod) + } + if gotURI != tt.wantURI { + t.Errorf("parseRequestField() gotURI = %v, want %v", gotURI, tt.wantURI) + } + if gotProtoName != tt.wantProtoName { + t.Errorf("parseRequestField() gotProtoName = %v, want %v", gotProtoName, tt.wantProtoName) + } + if gotProtoVersion != tt.wantProtoVersion { + t.Errorf("parseRequestField() gotProtoVersion = %v, want %v", gotProtoVersion, tt.wantProtoVersion) + } + }) + } +} From 1fa266eaeeec65eea7e0218815b72a1712fdd7c7 Mon Sep 17 00:00:00 2001 From: Isaac Flores <34590010+isaacaflores2@users.noreply.github.com> Date: Wed, 26 Nov 2025 23:56:50 -0800 Subject: [PATCH 21/41] [exporter/elasticsearch] Update ecs mode span encoder to include `span.kind` attribute (#44139) #### Description `span.kind` is missing from indexed documents when the mapping-mode is set to `ecs`. This PR adds adds the missing attribute encoding. #### Link to tracking issue Fixes n/a #### Testing 1. Updated unit-test #### Documentation --------- Co-authored-by: Carson Ip --- .chloggen/es-exporter-ecs-span-kind.yaml | 27 ++++++++++++++++++++ exporter/elasticsearchexporter/model.go | 21 +++++++++++++++ exporter/elasticsearchexporter/model_test.go | 1 + 3 files changed, 49 insertions(+) create mode 100644 .chloggen/es-exporter-ecs-span-kind.yaml diff --git a/.chloggen/es-exporter-ecs-span-kind.yaml b/.chloggen/es-exporter-ecs-span-kind.yaml new file mode 100644 index 0000000000000..a25943c01b812 --- /dev/null +++ b/.chloggen/es-exporter-ecs-span-kind.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: exporter/elasticsearch + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Updates the ecs mode span encode to include the `span.kind` attribute + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44139] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index d4136e7db1fb4..ad7872c6bf5a5 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -289,10 +289,31 @@ func (ecsModeEncoder) encodeSpan( document.AddString("event.outcome", "failure") } document.AddLinks("span.links", span.Links()) + if spanKind := spanKindToECSStr(span.Kind()); spanKind != "" { + document.AddString("span.kind", spanKind) + } return document.Serialize(buf, true) } +// spanKindToECSStr converts an OTel SpanKind to its ECS equivalent string representation defined here: +// https://github.com/elastic/apm-data/blob/main/input/elasticapm/internal/modeldecoder/v2/decoder.go#L1665-L1669 +func spanKindToECSStr(sk ptrace.SpanKind) string { + switch sk { + case ptrace.SpanKindInternal: + return "INTERNAL" + case ptrace.SpanKindServer: + return "SERVER" + case ptrace.SpanKindClient: + return "CLIENT" + case ptrace.SpanKindProducer: + return "PRODUCER" + case ptrace.SpanKindConsumer: + return "CONSUMER" + } + return "" +} + func (e otelModeEncoder) encodeLog( ec encodingContext, record plog.LogRecord, diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index a9148f71837f9..4788bcc1211da 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -533,6 +533,7 @@ func TestEncodeSpanECSMode(t *testing.T) { "id": "1920212223242526", "name": "client span", "action": "receive", + "kind": "CLIENT", "db": { "instance": "users", "statement": "SELECT * FROM users WHERE user_id=?", From 3a7efaccb2442bd2d380a8ed7e7b71142bc7a811 Mon Sep 17 00:00:00 2001 From: Otavio Date: Thu, 27 Nov 2025 04:57:18 -0300 Subject: [PATCH 22/41] Add zstd compression on exporter/aws3 (#44542) #### Description Add support for `zstd` compression on `exporter/awss3` #### Testing Automated tests passed. Manual validation, downloaded files from S3 and decompressed using `zstd` CLI. #### Documentation Updated the existent documentation to add `zstd` option as compression. --- .chloggen/zstdCompression.yaml | 27 ++++++++++++++++ exporter/awss3exporter/README.md | 1 + exporter/awss3exporter/config.go | 4 +-- exporter/awss3exporter/config_test.go | 19 +++++++++++ exporter/awss3exporter/go.mod | 1 + .../internal/upload/partition.go | 1 + .../awss3exporter/internal/upload/writer.go | 17 ++++++++++ .../internal/upload/writer_test.go | 32 ++++++++++++++++++- .../awss3exporter/testdata/compression.yaml | 6 ++++ 9 files changed, 105 insertions(+), 3 deletions(-) create mode 100644 .chloggen/zstdCompression.yaml diff --git a/.chloggen/zstdCompression.yaml b/.chloggen/zstdCompression.yaml new file mode 100644 index 0000000000000..d3a60562044e9 --- /dev/null +++ b/.chloggen/zstdCompression.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: exporter/awss3 + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Support compression with ZSTD + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44542] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/exporter/awss3exporter/README.md b/exporter/awss3exporter/README.md index e57a86e6bfc7c..c58c51d71eff3 100644 --- a/exporter/awss3exporter/README.md +++ b/exporter/awss3exporter/README.md @@ -83,6 +83,7 @@ See https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/ ### Compression - `none` (default): No compression will be applied - `gzip`: Files will be compressed with gzip. +- `zstd`: Files will be compressed with zstd. ### resource_attrs_to_s3 - `s3_bucket`: Defines which resource attribute's value should be used as the S3 bucket. diff --git a/exporter/awss3exporter/config.go b/exporter/awss3exporter/config.go index 68348462cecd0..9641a502d7486 100644 --- a/exporter/awss3exporter/config.go +++ b/exporter/awss3exporter/config.go @@ -49,7 +49,7 @@ type S3UploaderConfig struct { StorageClass string `mapstructure:"storage_class"` // Compression sets the algorithm used to process the payload // before uploading to S3. - // Valid values are: `gzip` or no value set. + // Valid values are: `gzip`, `zstd`, or no value set. Compression configcompression.Type `mapstructure:"compression"` // RetryMode specifies the retry mode for S3 client, default is "standard". @@ -143,7 +143,7 @@ func (c *Config) Validate() error { compression := c.S3Uploader.Compression if compression.IsCompressed() { - if compression != configcompression.TypeGzip { + if compression != configcompression.TypeGzip && compression != configcompression.TypeZstd { errs = multierr.Append(errs, errors.New("unknown compression type")) } } diff --git a/exporter/awss3exporter/config_test.go b/exporter/awss3exporter/config_test.go index ad252fc39ab19..b37c05d8c1251 100644 --- a/exporter/awss3exporter/config_test.go +++ b/exporter/awss3exporter/config_test.go @@ -440,6 +440,25 @@ func TestCompressionName(t *testing.T) { MarshalerName: "otlp_proto", }, e, ) + + e = cfg.Exporters[component.MustNewIDWithName("awss3", "zstd")].(*Config) + + assert.Equal(t, &Config{ + QueueSettings: queueCfg, + TimeoutSettings: timeoutCfg, + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "bar", + S3PartitionFormat: "year=%Y/month=%m/day=%d/hour=%H/minute=%M", + Compression: "zstd", + StorageClass: "STANDARD", + RetryMode: DefaultRetryMode, + RetryMaxAttempts: DefaultRetryMaxAttempts, + RetryMaxBackoff: DefaultRetryMaxBackoff, + }, + MarshalerName: "otlp_json", + }, e, + ) } func TestResourceAttrsToS3(t *testing.T) { diff --git a/exporter/awss3exporter/go.mod b/exporter/awss3exporter/go.mod index 2fa802bc3fe46..b82b08edaa92d 100644 --- a/exporter/awss3exporter/go.mod +++ b/exporter/awss3exporter/go.mod @@ -11,6 +11,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.41.1 github.com/google/uuid v1.6.0 github.com/itchyny/timefmt-go v0.1.7 + github.com/klauspost/compress v1.18.1 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.140.1 github.com/stretchr/testify v1.11.1 github.com/tilinna/clock v1.1.0 diff --git a/exporter/awss3exporter/internal/upload/partition.go b/exporter/awss3exporter/internal/upload/partition.go index 8af9ba0a120de..9e15e41e604d9 100644 --- a/exporter/awss3exporter/internal/upload/partition.go +++ b/exporter/awss3exporter/internal/upload/partition.go @@ -17,6 +17,7 @@ import ( var compressionFileExtensions = map[configcompression.Type]string{ configcompression.TypeGzip: ".gz", + configcompression.TypeZstd: ".zst", } type PartitionKeyBuilder struct { diff --git a/exporter/awss3exporter/internal/upload/writer.go b/exporter/awss3exporter/internal/upload/writer.go index f17ae48219291..a2f1e81603218 100644 --- a/exporter/awss3exporter/internal/upload/writer.go +++ b/exporter/awss3exporter/internal/upload/writer.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/klauspost/compress/zstd" "github.com/tilinna/clock" "go.opentelemetry.io/collector/config/configcompression" ) @@ -112,6 +113,22 @@ func (sw *s3manager) contentBuffer(raw []byte) (*bytes.Buffer, error) { return nil, err } + return content, nil + case configcompression.TypeZstd: + content := bytes.NewBuffer(nil) + zipper, err := zstd.NewWriter(content) + if err != nil { + return nil, err + } + _, err = zipper.Write(raw) + if err != nil { + return nil, err + } + err = zipper.Close() + if err != nil { + return nil, err + } + return content, nil default: return bytes.NewBuffer(raw), nil diff --git a/exporter/awss3exporter/internal/upload/writer_test.go b/exporter/awss3exporter/internal/upload/writer_test.go index e62916451984b..d6de3cb49ea2f 100644 --- a/exporter/awss3exporter/internal/upload/writer_test.go +++ b/exporter/awss3exporter/internal/upload/writer_test.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/klauspost/compress/zstd" "github.com/stretchr/testify/assert" "github.com/tilinna/clock" "go.opentelemetry.io/collector/config/configcompression" @@ -66,7 +67,7 @@ func TestS3ManagerUpload(t *testing.T) { uploadOpts: nil, }, { - name: "successful compression upload", + name: "successful compression upload gzip", handler: func(t *testing.T) http.Handler { return http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { assert.Equal( @@ -94,6 +95,35 @@ func TestS3ManagerUpload(t *testing.T) { errVal: "", uploadOpts: nil, }, + { + name: "successful compression upload zstd", + handler: func(t *testing.T) http.Handler { + return http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { + assert.Equal( + t, + "/my-bucket/telemetry/year=2024/month=01/day=10/hour=10/minute=30/signal-data-noop_random.metrics.zst", + r.URL.Path, + "Must match the expected path", + ) + + reader, err := zstd.NewReader(r.Body) + if !assert.NoError(t, err, "Must not error creating zstd reader") { + return + } + + data, err := io.ReadAll(reader) + assert.Equal(t, []byte("hello world"), data, "Must match the expected data") + assert.NoError(t, err, "Must not error reading data from reader") + + reader.Close() + _ = r.Body.Close() + }) + }, + compression: configcompression.TypeZstd, + data: []byte("hello world"), + errVal: "", + uploadOpts: nil, + }, { name: "no data upload", handler: func(t *testing.T) http.Handler { diff --git a/exporter/awss3exporter/testdata/compression.yaml b/exporter/awss3exporter/testdata/compression.yaml index 68966317b9617..96fad8ed453a4 100644 --- a/exporter/awss3exporter/testdata/compression.yaml +++ b/exporter/awss3exporter/testdata/compression.yaml @@ -14,6 +14,12 @@ exporters: compression: "none" marshaler: otlp_proto + awss3/zstd: + s3uploader: + s3_bucket: "bar" + compression: "zstd" + marshaler: otlp_json + processors: nop: From c00788f9fa2ed30f8b4531110d66b035cd8012b9 Mon Sep 17 00:00:00 2001 From: Alex Van Boxel Date: Thu, 27 Nov 2025 08:59:36 +0100 Subject: [PATCH 23/41] [exporter/googlecloudpubsub] Add encoding extension support (#44544) #### Description Add encoding extension support for the payload on Pub/Sub. As having custom extensions means the Pub/Sub attributes cannot be auto discovered additional functionality has been added to set the message attributes. #### Link to tracking issue Fixes * https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/42270 * https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/41834 #### Testing Added new test testing the new functionality. Also tested end-to-end (with the pub/sub receiver) #### Documentation Added section in the README.md describing the extra functionality and options in the config --- ...cloudpubsub-exporter-support-encoding.yaml | 29 ++ exporter/googlecloudpubsubexporter/README.md | 38 +- exporter/googlecloudpubsubexporter/config.go | 15 + .../googlecloudpubsubexporter/exporter.go | 197 +++++++++-- .../exporter_test.go | 332 +++++++++++++++++- exporter/googlecloudpubsubexporter/factory.go | 21 +- exporter/googlecloudpubsubexporter/go.mod | 7 +- exporter/googlecloudpubsubexporter/go.sum | 6 +- 8 files changed, 587 insertions(+), 58 deletions(-) create mode 100644 .chloggen/googlecloudpubsub-exporter-support-encoding.yaml diff --git a/.chloggen/googlecloudpubsub-exporter-support-encoding.yaml b/.chloggen/googlecloudpubsub-exporter-support-encoding.yaml new file mode 100644 index 0000000000000..3dfdc5bdb852d --- /dev/null +++ b/.chloggen/googlecloudpubsub-exporter-support-encoding.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/googlecloudpubsub + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add encoding extension support + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [42270,41834] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Add encoding extension support for the payload on Pub/Sub. As having custom extensions means the Pub/Sub attributes + cannot be auto discovered additional functionality has been added to set the message attributes. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/googlecloudpubsubexporter/README.md b/exporter/googlecloudpubsubexporter/README.md index 4ea4162b197e2..83cbc42ceb9c4 100644 --- a/exporter/googlecloudpubsubexporter/README.md +++ b/exporter/googlecloudpubsubexporter/README.md @@ -30,7 +30,7 @@ The following configuration options are supported: drift is set to 0, the maximum drift from the clock is allowed (only applicable to `earliest`). * `endpoint` (Optional): Override the default Pubsub Endpoint, useful when connecting to the PubSub emulator instance or switching between [global and regional service endpoints](https://cloud.google.com/pubsub/docs/reference/service_apis_overview#service_endpoints). -* `insecure` (Optional): allows performing β€œinsecure” SSL connections and transfers, useful when connecting to a local +* `insecure` (Optional): Allows performing β€œinsecure” SSL connections and transfers, useful when connecting to a local emulator instance. Only has effect if Endpoint is not "" * `ordering`: Configures the [PubSub ordering](https://cloud.google.com/pubsub/docs/ordering) feature, see [ordering](#ordering) section for more info. @@ -40,6 +40,11 @@ The following configuration options are supported: ordered for this resource. * `remove_resource_attribute` (default = `false`): if the ordering key resource attribute specified `from_resource_attribute` should be removed from the resource attributes. +* `traces`, `metrics` and `logs` (Optional): Allows overriding the standard OTLP Protobuf + [encoding and the message attributes](#encoding-and-message-attributes). + attributes. + * `encoding` (Optional): An encoding extension, if not specified it uses the default Protobuf marshaller. + * `attributes` (Optional): Attributes that will be added to the Pub/Sub message. ```yaml exporters: @@ -160,3 +165,34 @@ for more details. PubSub requires one publish request per ordering key value, so this exporter groups the signals per ordering key before publishing. + +## Encoding and message attributes + +The `traces`, `metrics` and `logs` section allows you to specify Encoding Extensions for marshalling the messages on +the topic and the attributes on the Pub/Sub message. All the signals have the same config options. + +It's important to note that when you use an extension all the CloudEvent attributes are removed as you use your own +encoder as the exporter can't know what valute to set. You have the opportunity to manually set them. + +```yaml +extensions: + otlp_encoding: + protocol: otlp_json + +exporters: + googlecloudpubsub: + project: my-project + topic: projects/my-project/topics/otlp-traces + traces: + encoding: otlp_encoding + attributes: + "ce-type": "org.opentelemetry.otlp.traces.v1" + "content-type": "application/json" +``` + +The `encoding` option allows you to specify Encoding Extensions for marshalling the messages on the topic. An +extension need to be configured in the `extensions` section, and added to pipeline in the collectors configuration file. + +The `attributes` option allows you to set any attributes, the values are key/value pairs. You can avoid the removal of +CloudEvent attributes if you manually specify the `ce-type` and `content-type` to an appropriate value for the chosen +encoding. \ No newline at end of file diff --git a/exporter/googlecloudpubsubexporter/config.go b/exporter/googlecloudpubsubexporter/config.go index 5fb091d8bc51f..c67d12825797e 100644 --- a/exporter/googlecloudpubsubexporter/config.go +++ b/exporter/googlecloudpubsubexporter/config.go @@ -9,6 +9,7 @@ import ( "regexp" "time" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.uber.org/multierr" @@ -38,6 +39,12 @@ type Config struct { Watermark WatermarkConfig `mapstructure:"watermark"` // Ordering configures the ordering keys Ordering OrderingConfig `mapstructure:"ordering"` + // LogsSignalConfig allows for custom log configuration + LogsSignalConfig SignalConfig `mapstructure:"logs"` + // MetricsSignalConfig allows for custom log configuration + MetricsSignalConfig SignalConfig `mapstructure:"metrics"` + // TracesSignalConfig allows for custom log configuration + TracesSignalConfig SignalConfig `mapstructure:"traces"` } // WatermarkConfig customizes the behavior of the watermark @@ -60,6 +67,14 @@ type OrderingConfig struct { RemoveResourceAttribute bool `mapstructure:"remove_resource_attribute"` } +// SignalConfig holds signal-specific configuration for the Kafka exporter. +type SignalConfig struct { + // Encoding is a custom encoding for the marshaling the data onto the message + Encoding component.ID `mapstructure:"encoding"` + // Attributes are custom Pub/Sub message attributes + Attributes map[string]string `mapstructure:"attributes"` +} + func (config *Config) Validate() error { var errors error if !topicMatcher.MatchString(config.Topic) { diff --git a/exporter/googlecloudpubsubexporter/exporter.go b/exporter/googlecloudpubsubexporter/exporter.go index 7fa0e5ba445b8..09d49e997362c 100644 --- a/exporter/googlecloudpubsubexporter/exporter.go +++ b/exporter/googlecloudpubsubexporter/exporter.go @@ -17,6 +17,8 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding" ) type pubsubExporter struct { @@ -28,10 +30,13 @@ type pubsubExporter struct { ceCompression compression config *Config tracesMarshaler ptrace.Marshaler + tracesAttributes map[string]string tracesWatermarkFunc tracesWatermarkFunc metricsMarshaler pmetric.Marshaler + metricsAttributes map[string]string metricsWatermarkFunc metricsWatermarkFunc logsMarshaler plog.Marshaler + logsAttributes map[string]string logsWatermarkFunc logsWatermarkFunc // To be overridden in tests @@ -39,12 +44,12 @@ type pubsubExporter struct { makeClient func(ctx context.Context, cfg *Config, userAgent string) (publisherClient, error) } -type encoding int +type signal int const ( - otlpProtoTrace encoding = iota - otlpProtoMetric = iota - otlpProtoLog = iota + signalTrace signal = iota + signalMetric = iota + signalLog = iota ) type compression int @@ -61,16 +66,27 @@ const ( earliest = iota ) -func (ex *pubsubExporter) start(ctx context.Context, _ component.Host) error { +func (ex *pubsubExporter) start(ctx context.Context, host component.Host) error { ctx, ex.cancel = context.WithCancel(ctx) + var err error + err = ex.initTraces(host) + if err != nil { + return err + } + err = ex.initMetrics(host) + if err != nil { + return err + } + err = ex.initLogs(host) + if err != nil { + return err + } if ex.client == nil { - client, err := ex.makeClient(ctx, ex.config, ex.userAgent) + ex.client, err = ex.makeClient(ctx, ex.config, ex.userAgent) if err != nil { return fmt.Errorf("failed creating the gRPC client to Pubsub: %w", err) } - - ex.client = client } return nil } @@ -85,37 +101,140 @@ func (ex *pubsubExporter) shutdown(_ context.Context) error { return client.Close() } -func (ex *pubsubExporter) getMessageAttributes(encoding encoding, watermark time.Time) (map[string]string, error) { - id, err := ex.makeUUID() - if err != nil { - return nil, err +func (ex *pubsubExporter) initTraces(host component.Host) error { + var err error + signalConfig := ex.config.TracesSignalConfig + if signalConfig.Encoding.String() != "" { + err = ex.setTracesMarshalerFromExtension(host, signalConfig.Encoding) + if err != nil { + return err + } + } else { + ex.tracesMarshaler = &ptrace.ProtoMarshaler{} + ex.tracesAttributes["ce-type"] = "org.opentelemetry.otlp.traces.v1" + ex.tracesAttributes["content-type"] = "application/protobuf" } - ceTime, err := watermark.MarshalText() - if err != nil { - return nil, err - } - - attributes := map[string]string{ - "ce-specversion": "1.0", - "ce-id": id.String(), - "ce-source": ex.ceSource, - "ce-time": string(ceTime), - } - switch encoding { - case otlpProtoTrace: - attributes["ce-type"] = "org.opentelemetry.otlp.traces.v1" - attributes["content-type"] = "application/protobuf" - case otlpProtoMetric: - attributes["ce-type"] = "org.opentelemetry.otlp.metrics.v1" - attributes["content-type"] = "application/protobuf" - case otlpProtoLog: - attributes["ce-type"] = "org.opentelemetry.otlp.logs.v1" - attributes["content-type"] = "application/protobuf" + if len(signalConfig.Attributes) > 0 { + for k, v := range signalConfig.Attributes { + ex.tracesAttributes[k] = v + } } - if ex.ceCompression == gZip { - attributes["content-encoding"] = "gzip" + return nil +} + +func (ex *pubsubExporter) initMetrics(host component.Host) error { + var err error + signalConfig := ex.config.MetricsSignalConfig + if signalConfig.Encoding.String() != "" { + err = ex.setMetricsMarshalerFromExtension(host, signalConfig.Encoding) + if err != nil { + return err + } + } else { + ex.metricsMarshaler = &pmetric.ProtoMarshaler{} + ex.metricsAttributes["ce-type"] = "org.opentelemetry.otlp.metrics.v1" + ex.metricsAttributes["content-type"] = "application/protobuf" + } + if len(signalConfig.Attributes) > 0 { + for k, v := range signalConfig.Attributes { + ex.metricsAttributes[k] = v + } + } + return nil +} + +func (ex *pubsubExporter) initLogs(host component.Host) error { + var err error + signalConfig := ex.config.LogsSignalConfig + if signalConfig.Encoding.String() != "" { + err = ex.setLogsMarshalerFromExtension(host, signalConfig.Encoding) + if err != nil { + return err + } + } else { + ex.logsMarshaler = &plog.ProtoMarshaler{} + ex.logsAttributes["ce-type"] = "org.opentelemetry.otlp.logs.v1" + ex.logsAttributes["content-type"] = "application/protobuf" + } + if len(signalConfig.Attributes) > 0 { + for k, v := range signalConfig.Attributes { + ex.logsAttributes[k] = v + } + } + return nil +} + +func (ex *pubsubExporter) setTracesMarshalerFromExtension(host component.Host, extensionID component.ID) error { + extensions := host.GetExtensions() + if extension, ok := extensions[extensionID]; ok { + ex.tracesMarshaler, ok = extension.(encoding.TracesMarshalerExtension) + if !ok { + return fmt.Errorf("cannot start receiver: extension %q is not a trace unmarshaler", extensionID) + } + } else { + return fmt.Errorf("cannot start receiver: extension %q not found for traces", extensionID) + } + return nil +} + +func (ex *pubsubExporter) setMetricsMarshalerFromExtension(host component.Host, extensionID component.ID) error { + extensions := host.GetExtensions() + if extension, ok := extensions[extensionID]; ok { + ex.metricsMarshaler, ok = extension.(encoding.MetricsMarshalerExtension) + if !ok { + return fmt.Errorf("cannot start receiver: extension %q is not a metric unmarshaler", extensionID) + } + } else { + return fmt.Errorf("cannot start receiver: extension %q not found for metrics", extensionID) + } + return nil +} + +func (ex *pubsubExporter) setLogsMarshalerFromExtension(host component.Host, extensionID component.ID) error { + extensions := host.GetExtensions() + if extension, ok := extensions[extensionID]; ok { + ex.logsMarshaler, ok = extension.(encoding.LogsMarshalerExtension) + if !ok { + return fmt.Errorf("cannot start receiver: extension %q is not a log unmarshaler", extensionID) + } + } else { + return fmt.Errorf("cannot start receiver: extension %q not found for logs", extensionID) + } + return nil +} + +func (ex *pubsubExporter) getMessageAttributes(signal signal, watermark time.Time) (map[string]string, error) { + attributes := map[string]string{} + var source map[string]string + switch signal { + case signalTrace: + source = ex.tracesAttributes + case signalMetric: + source = ex.metricsAttributes + case signalLog: + source = ex.logsAttributes + } + for k, v := range source { + attributes[k] = v + } + if attributes["ce-type"] != "" { + id, err := ex.makeUUID() + if err != nil { + return nil, err + } + ceTime, err := watermark.MarshalText() + if err != nil { + return nil, err + } + attributes["ce-specversion"] = "1.0" + attributes["ce-id"] = id.String() + attributes["ce-source"] = ex.ceSource + attributes["ce-time"] = string(ceTime) + if ex.ceCompression == gZip { + attributes["content-encoding"] = "gzip" + } } - return attributes, err + return attributes, nil } func (ex *pubsubExporter) consumeTraces(ctx context.Context, traces ptrace.Traces) error { @@ -160,7 +279,7 @@ func (ex *pubsubExporter) consumeTraces(ctx context.Context, traces ptrace.Trace func (ex *pubsubExporter) publishTraces(ctx context.Context, tracesForKey ptrace.Traces, orderingKey string) error { watermark := ex.tracesWatermarkFunc(tracesForKey, time.Now(), ex.config.Watermark.AllowedDrift).UTC() - attributes, attributesErr := ex.getMessageAttributes(otlpProtoTrace, watermark) + attributes, attributesErr := ex.getMessageAttributes(signalTrace, watermark) if attributesErr != nil { return fmt.Errorf("error while preparing pubsub message attributes: %w", attributesErr) } @@ -215,7 +334,7 @@ func (ex *pubsubExporter) consumeMetrics(ctx context.Context, metrics pmetric.Me func (ex *pubsubExporter) publishMetrics(ctx context.Context, metricsForKey pmetric.Metrics, orderingKey string) error { watermark := ex.metricsWatermarkFunc(metricsForKey, time.Now(), ex.config.Watermark.AllowedDrift).UTC() - attributes, attributesErr := ex.getMessageAttributes(otlpProtoMetric, watermark) + attributes, attributesErr := ex.getMessageAttributes(signalMetric, watermark) if attributesErr != nil { return fmt.Errorf("error while preparing pubsub message attributes: %w", attributesErr) } @@ -272,7 +391,7 @@ func (ex *pubsubExporter) consumeLogs(ctx context.Context, logs plog.Logs) error func (ex *pubsubExporter) publishLogs(ctx context.Context, logs plog.Logs, orderingKey string) error { watermark := ex.logsWatermarkFunc(logs, time.Now(), ex.config.Watermark.AllowedDrift).UTC() - attributes, attributesErr := ex.getMessageAttributes(otlpProtoLog, watermark) + attributes, attributesErr := ex.getMessageAttributes(signalLog, watermark) if attributesErr != nil { return fmt.Errorf("error while preparing pubsub message attributes: %w", attributesErr) } diff --git a/exporter/googlecloudpubsubexporter/exporter_test.go b/exporter/googlecloudpubsubexporter/exporter_test.go index 5badc12a92d77..63597a779c44c 100644 --- a/exporter/googlecloudpubsubexporter/exporter_test.go +++ b/exporter/googlecloudpubsubexporter/exporter_test.go @@ -14,6 +14,7 @@ import ( "github.com/googleapis/gax-go/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exportertest" "go.opentelemetry.io/collector/pdata/plog" @@ -35,7 +36,7 @@ func TestGetMessageAttributes(t *testing.T) { t.Run("logs", func(t *testing.T) { exporter, _ := newTestExporter(t) - gotAttributes, err := exporter.getMessageAttributes(otlpProtoLog, date) + gotAttributes, err := exporter.getMessageAttributes(signalLog, date) require.NoError(t, err) expectedAttributes := map[string]string{ @@ -52,7 +53,7 @@ func TestGetMessageAttributes(t *testing.T) { t.Run("metrics", func(t *testing.T) { exporter, _ := newTestExporter(t) - gotAttributes, err := exporter.getMessageAttributes(otlpProtoMetric, date) + gotAttributes, err := exporter.getMessageAttributes(signalMetric, date) require.NoError(t, err) expectedAttributes := map[string]string{ @@ -69,7 +70,7 @@ func TestGetMessageAttributes(t *testing.T) { t.Run("traces", func(t *testing.T) { exporter, _ := newTestExporter(t) - gotAttributes, err := exporter.getMessageAttributes(otlpProtoTrace, date) + gotAttributes, err := exporter.getMessageAttributes(signalTrace, date) require.NoError(t, err) expectedAttributes := map[string]string{ @@ -88,7 +89,7 @@ func TestGetMessageAttributes(t *testing.T) { cfg.Compression = "gzip" }) - gotAttributes, err := exporter.getMessageAttributes(otlpProtoLog, date) + gotAttributes, err := exporter.getMessageAttributes(signalLog, date) require.NoError(t, err) expectedAttributes := map[string]string{ @@ -102,6 +103,161 @@ func TestGetMessageAttributes(t *testing.T) { } assert.Equal(t, expectedAttributes, gotAttributes) }) + + t.Run("logs with extension", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.LogsSignalConfig = SignalConfig{ + Encoding: component.NewID(component.MustNewType("all_mock_encoding")), + Attributes: nil, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalLog, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{} + assert.Equal(t, expectedAttributes, gotAttributes) + }) + + t.Run("metrics with extension", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.MetricsSignalConfig = SignalConfig{ + Encoding: component.NewID(component.MustNewType("all_mock_encoding")), + Attributes: nil, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalMetric, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{} + assert.Equal(t, expectedAttributes, gotAttributes) + }) + + t.Run("traces with extension", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.TracesSignalConfig = SignalConfig{ + Encoding: component.NewID(component.MustNewType("all_mock_encoding")), + Attributes: nil, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalTrace, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{} + assert.Equal(t, expectedAttributes, gotAttributes) + }) + + t.Run("logs with extension and ce-type", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.LogsSignalConfig = SignalConfig{ + Encoding: component.NewID(component.MustNewType("all_mock_encoding")), + Attributes: map[string]string{ + "ce-type": "org.opentelemetry.mock.logs.v1", + }, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalLog, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{ + "ce-id": "00000000-0000-0000-0000-000000000000", + "ce-source": "/opentelemetry/collector/googlecloudpubsub/latest", + "ce-specversion": "1.0", + "ce-time": "2021-01-01T02:03:04.000000005Z", + "ce-type": "org.opentelemetry.mock.logs.v1", + } + assert.Equal(t, expectedAttributes, gotAttributes) + }) + + t.Run("logs with extension and ce-type+attribute", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.LogsSignalConfig = SignalConfig{ + Encoding: component.NewID(component.MustNewType("all_mock_encoding")), + Attributes: map[string]string{ + "ce-type": "org.opentelemetry.mock.logs.v1", + "foo": "bar", + }, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalLog, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{ + "ce-id": "00000000-0000-0000-0000-000000000000", + "ce-source": "/opentelemetry/collector/googlecloudpubsub/latest", + "ce-specversion": "1.0", + "ce-time": "2021-01-01T02:03:04.000000005Z", + "ce-type": "org.opentelemetry.mock.logs.v1", + "foo": "bar", + } + assert.Equal(t, expectedAttributes, gotAttributes) + }) + + t.Run("logs with extension and attribute", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.LogsSignalConfig = SignalConfig{ + Encoding: component.NewID(component.MustNewType("all_mock_encoding")), + Attributes: map[string]string{ + "foo": "bar", + }, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalLog, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{ + "foo": "bar", + } + assert.Equal(t, expectedAttributes, gotAttributes) + }) + + t.Run("metrics with extension and attribute", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.MetricsSignalConfig = SignalConfig{ + Encoding: component.NewID(component.MustNewType("all_mock_encoding")), + Attributes: map[string]string{ + "foo": "bar", + }, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalMetric, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{ + "foo": "bar", + } + assert.Equal(t, expectedAttributes, gotAttributes) + }) + + t.Run("traces with attribute", func(t *testing.T) { + exporter, _ := newTestExporter(t, func(cfg *Config) { + cfg.TracesSignalConfig = SignalConfig{ + Attributes: map[string]string{ + "foo": "bar", + }, + } + }) + + gotAttributes, err := exporter.getMessageAttributes(signalTrace, date) + require.NoError(t, err) + + expectedAttributes := map[string]string{ + "ce-id": "00000000-0000-0000-0000-000000000000", + "ce-source": "/opentelemetry/collector/googlecloudpubsub/latest", + "ce-specversion": "1.0", + "ce-time": "2021-01-01T02:03:04.000000005Z", + "ce-type": "org.opentelemetry.otlp.traces.v1", + "content-type": "application/protobuf", + "foo": "bar", + } + assert.Equal(t, expectedAttributes, gotAttributes) + }) } func TestExporterNoData(t *testing.T) { @@ -131,6 +287,97 @@ func TestExporterClientError(t *testing.T) { require.Error(t, exporter.start(t.Context(), componenttest.NewNopHost())) } +func TestExporterStartError(t *testing.T) { + testCases := []struct { + name string + config func(*Config) + expected string + }{ + { + name: "logs with non existing encoding", + config: func(cfg *Config) { + cfg.LogsSignalConfig = SignalConfig{ + Encoding: component.MustNewID("non_existing_encoding"), + } + }, + expected: "cannot start receiver: extension \"non_existing_encoding\" not found for logs", + }, + { + name: "metrics with non existing encoding", + config: func(cfg *Config) { + cfg.MetricsSignalConfig = SignalConfig{ + Encoding: component.MustNewID("non_existing_encoding"), + } + }, + expected: "cannot start receiver: extension \"non_existing_encoding\" not found for metrics", + }, + { + name: "traces with non existing encoding", + config: func(cfg *Config) { + cfg.TracesSignalConfig = SignalConfig{ + Encoding: component.MustNewID("non_existing_encoding"), + } + }, + expected: "cannot start receiver: extension \"non_existing_encoding\" not found for traces", + }, + { + name: "logs with incompatible encoding", + config: func(cfg *Config) { + cfg.LogsSignalConfig = SignalConfig{ + Encoding: component.MustNewID("metric_mock_encoding"), + } + }, + expected: "cannot start receiver: extension \"metric_mock_encoding\" is not a log unmarshaler", + }, + { + name: "metrics with incompatible encoding", + config: func(cfg *Config) { + cfg.MetricsSignalConfig = SignalConfig{ + Encoding: component.MustNewID("trace_mock_encoding"), + } + }, + expected: "cannot start receiver: extension \"trace_mock_encoding\" is not a metric unmarshaler", + }, + { + name: "traces with incompatible encoding", + config: func(cfg *Config) { + cfg.TracesSignalConfig = SignalConfig{ + Encoding: component.MustNewID("log_mock_encoding"), + } + }, + expected: "cannot start receiver: extension \"log_mock_encoding\" is not a trace unmarshaler", + }, + } + + // Iterate over the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Helper() + + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ProjectID = defaultProjectID + cfg.Topic = defaultTopic + tc.config(cfg) + + require.NoError(t, cfg.Validate()) + + exporter := ensureExporter(exportertest.NewNopSettings(metadata.Type), cfg) + publisher := &mockPublisher{} + exporter.makeClient = func(context.Context, *Config, string) (publisherClient, error) { + return publisher, nil + } + exporter.makeUUID = func() (uuid.UUID, error) { + return uuid.Parse(defaultUUID) + } + + err := exporter.start(t.Context(), &mockHost{}) + assert.Equal(t, tc.expected, err.Error()) + t.Cleanup(func() { assert.NoError(t, exporter.shutdown(t.Context())) }) + }) + } +} + func TestExporterSimpleData(t *testing.T) { t.Run("logs", func(t *testing.T) { exporter, publisher := newTestExporter(t) @@ -457,7 +704,7 @@ func newTestExporter(t *testing.T, options ...func(*Config)) (*pubsubExporter, * return uuid.Parse(defaultUUID) } - require.NoError(t, exporter.start(t.Context(), componenttest.NewNopHost())) + require.NoError(t, exporter.start(t.Context(), &mockHost{})) t.Cleanup(func() { assert.NoError(t, exporter.shutdown(t.Context())) }) return exporter, publisher @@ -475,3 +722,78 @@ func (m *mockPublisher) Publish(_ context.Context, request *pb.PublishRequest, _ func (*mockPublisher) Close() error { return nil } + +type mockExtension struct{} + +func (mockExtension) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (mockExtension) Shutdown(_ context.Context) error { + return nil +} + +func (mockExtension) MarshalLogs(_ plog.Logs) ([]byte, error) { + return []byte{}, nil +} + +func (mockExtension) MarshalMetrics(_ pmetric.Metrics) ([]byte, error) { + return []byte{}, nil +} + +func (mockExtension) MarshalTraces(_ ptrace.Traces) ([]byte, error) { + return []byte{}, nil +} + +type mockTraceExtension struct{} + +func (mockTraceExtension) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (mockTraceExtension) Shutdown(_ context.Context) error { + return nil +} + +func (mockTraceExtension) MarshalTraces(_ ptrace.Traces) ([]byte, error) { + return []byte{0xf0}, nil +} + +type mockMetricExtension struct{} + +func (mockMetricExtension) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (mockMetricExtension) Shutdown(_ context.Context) error { + return nil +} + +func (mockMetricExtension) MarshalMetrics(_ pmetric.Metrics) ([]byte, error) { + return []byte{0xf0}, nil +} + +type mockLogExtension struct{} + +func (mockLogExtension) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (mockLogExtension) Shutdown(_ context.Context) error { + return nil +} + +func (mockLogExtension) MarshalLogs(_ plog.Logs) ([]byte, error) { + return []byte{0xf0}, nil +} + +type mockHost struct{} + +func (mockHost) GetExtensions() map[component.ID]component.Component { + ext := make(map[component.ID]component.Component) + ext[component.MustNewID("all_mock_encoding")] = mockExtension{} + ext[component.MustNewID("trace_mock_encoding")] = mockTraceExtension{} + ext[component.MustNewID("metric_mock_encoding")] = mockMetricExtension{} + ext[component.MustNewID("log_mock_encoding")] = mockLogExtension{} + return ext +} diff --git a/exporter/googlecloudpubsubexporter/factory.go b/exporter/googlecloudpubsubexporter/factory.go index 16a2112e96802..dc50449e5c79b 100644 --- a/exporter/googlecloudpubsubexporter/factory.go +++ b/exporter/googlecloudpubsubexporter/factory.go @@ -45,15 +45,18 @@ func ensureExporter(params exporter.Settings, pCfg *Config) *pubsubExporter { return exp } exp = &pubsubExporter{ - logger: params.Logger, - userAgent: strings.ReplaceAll(pCfg.UserAgent, "{{version}}", params.BuildInfo.Version), - ceSource: fmt.Sprintf("/opentelemetry/collector/%s/%s", metadata.Type.String(), params.BuildInfo.Version), - config: pCfg, - tracesMarshaler: &ptrace.ProtoMarshaler{}, - metricsMarshaler: &pmetric.ProtoMarshaler{}, - logsMarshaler: &plog.ProtoMarshaler{}, - makeUUID: uuid.NewRandom, - makeClient: newPublisherClient, + logger: params.Logger, + userAgent: strings.ReplaceAll(pCfg.UserAgent, "{{version}}", params.BuildInfo.Version), + ceSource: fmt.Sprintf("/opentelemetry/collector/%s/%s", metadata.Type.String(), params.BuildInfo.Version), + config: pCfg, + tracesMarshaler: &ptrace.ProtoMarshaler{}, + tracesAttributes: map[string]string{}, + metricsMarshaler: &pmetric.ProtoMarshaler{}, + metricsAttributes: map[string]string{}, + logsMarshaler: &plog.ProtoMarshaler{}, + logsAttributes: map[string]string{}, + makeUUID: uuid.NewRandom, + makeClient: newPublisherClient, } // we ignore the error here as the config is already validated with the same method exp.ceCompression, _ = pCfg.parseCompression() diff --git a/exporter/googlecloudpubsubexporter/go.mod b/exporter/googlecloudpubsubexporter/go.mod index 7c6f20b469d2a..d85d80225f885 100644 --- a/exporter/googlecloudpubsubexporter/go.mod +++ b/exporter/googlecloudpubsubexporter/go.mod @@ -6,6 +6,7 @@ require ( cloud.google.com/go/pubsub/v2 v2.3.0 github.com/google/uuid v1.6.0 github.com/googleapis/gax-go/v2 v2.15.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.140.1 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/collector/component v1.46.1-0.20251120204106-2e9c82787618 go.opentelemetry.io/collector/component/componenttest v0.140.1-0.20251120204106-2e9c82787618 @@ -29,7 +30,7 @@ require ( cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -46,7 +47,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/collector/client v1.46.1-0.20251120204106-2e9c82787618 // indirect go.opentelemetry.io/collector/config/configoptional v1.46.1-0.20251120204106-2e9c82787618 // indirect @@ -91,3 +92,5 @@ retract ( v0.76.1 v0.65.0 ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding => ../../extension/encoding diff --git a/exporter/googlecloudpubsubexporter/go.sum b/exporter/googlecloudpubsubexporter/go.sum index 3e0d9c4ed8445..fc5145976de34 100644 --- a/exporter/googlecloudpubsubexporter/go.sum +++ b/exporter/googlecloudpubsubexporter/go.sum @@ -13,8 +13,9 @@ github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F9 github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= @@ -70,8 +71,9 @@ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFd github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= From e3f0e54055ac30348ba27e339421bcd818fbc505 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Thu, 27 Nov 2025 00:05:37 -0800 Subject: [PATCH 24/41] [exporter/carbon] Delete unmainted component (#44532) #### Description This component followed the required unmaintained process of deprecation and now it's time to remove it. This can be merged once https://github.com/open-telemetry/opentelemetry-collector-releases/pull/1279 goes in. #### Link to tracking issue Fixes #38913 --- .chloggen/config.yaml | 1 - .chloggen/remove_carbon_exporter.yaml | 27 ++ .codecov.yml | 5 +- .github/ALLOWLIST | 1 - .github/CODEOWNERS | 1 - .github/ISSUE_TEMPLATE/beta_stability.yaml | 1 - .github/ISSUE_TEMPLATE/bug_report.yaml | 1 - .github/ISSUE_TEMPLATE/feature_request.yaml | 1 - .github/ISSUE_TEMPLATE/other.yaml | 1 - .github/ISSUE_TEMPLATE/unmaintained.yaml | 1 - .github/component_labels.txt | 1 - cmd/opampsupervisor/go.mod | 2 - cmd/otelcontribcol/builder-config.yaml | 1 - cmd/oteltestbedcol/builder-config.yaml | 1 - exporter/carbonexporter/Makefile | 1 - exporter/carbonexporter/README.md | 52 --- exporter/carbonexporter/config.go | 53 --- exporter/carbonexporter/config_test.go | 133 ------- exporter/carbonexporter/doc.go | 7 - exporter/carbonexporter/exporter.go | 204 ---------- exporter/carbonexporter/exporter_test.go | 364 ------------------ exporter/carbonexporter/factory.go | 54 --- exporter/carbonexporter/factory_test.go | 51 --- .../generated_component_test.go | 152 -------- .../carbonexporter/generated_package_test.go | 13 - exporter/carbonexporter/go.mod | 88 ----- exporter/carbonexporter/go.sum | 152 -------- .../internal/metadata/generated_status.go | 16 - exporter/carbonexporter/metadata.yaml | 14 - .../carbonexporter/metricdata_to_plaintext.go | 338 ---------------- .../metricdata_to_plaintext_test.go | 333 ---------------- exporter/carbonexporter/testdata/config.yaml | 24 -- .../integrationtest/go.mod | 2 - internal/tidylist/tidylist.txt | 5 +- reports/distributions/contrib.yaml | 1 - testbed/datasenders/carbon.go | 75 ---- testbed/go.mod | 3 - testbed/stabilitytests/metric_test.go | 17 - testbed/tests/metric_test.go | 9 - versions.yaml | 1 - 40 files changed, 30 insertions(+), 2177 deletions(-) create mode 100644 .chloggen/remove_carbon_exporter.yaml delete mode 100644 exporter/carbonexporter/Makefile delete mode 100644 exporter/carbonexporter/README.md delete mode 100644 exporter/carbonexporter/config.go delete mode 100644 exporter/carbonexporter/config_test.go delete mode 100644 exporter/carbonexporter/doc.go delete mode 100644 exporter/carbonexporter/exporter.go delete mode 100644 exporter/carbonexporter/exporter_test.go delete mode 100644 exporter/carbonexporter/factory.go delete mode 100644 exporter/carbonexporter/factory_test.go delete mode 100644 exporter/carbonexporter/generated_component_test.go delete mode 100644 exporter/carbonexporter/generated_package_test.go delete mode 100644 exporter/carbonexporter/go.mod delete mode 100644 exporter/carbonexporter/go.sum delete mode 100644 exporter/carbonexporter/internal/metadata/generated_status.go delete mode 100644 exporter/carbonexporter/metadata.yaml delete mode 100644 exporter/carbonexporter/metricdata_to_plaintext.go delete mode 100644 exporter/carbonexporter/metricdata_to_plaintext_test.go delete mode 100644 exporter/carbonexporter/testdata/config.yaml delete mode 100644 testbed/datasenders/carbon.go diff --git a/.chloggen/config.yaml b/.chloggen/config.yaml index dbbce3dba884b..a2368a6724ffd 100644 --- a/.chloggen/config.yaml +++ b/.chloggen/config.yaml @@ -39,7 +39,6 @@ components: - exporter/azuredataexplorer - exporter/azuremonitor - exporter/bmchelix - - exporter/carbon - exporter/cassandra - exporter/clickhouse - exporter/coralogix diff --git a/.chloggen/remove_carbon_exporter.yaml b/.chloggen/remove_carbon_exporter.yaml new file mode 100644 index 0000000000000..0579cf032c67a --- /dev/null +++ b/.chloggen/remove_carbon_exporter.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: cmd/otelcontribcol + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Removing unmaintained component `exporter/carbon` + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [38913] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.codecov.yml b/.codecov.yml index 2cc218eb9a59a..4256124ea8563 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -25,6 +25,7 @@ ignore: # Use component management to get coverage for each component. # See https://docs.codecov.com/docs/components # This list is autogenerated by codecovgen. +# Run `make gencodecov` to update # Start autogenerated components list component_management: individual_components: @@ -144,10 +145,6 @@ component_management: name: exporter_bmchelix paths: - exporter/bmchelixexporter/** - - component_id: exporter_carbon - name: exporter_carbon - paths: - - exporter/carbonexporter/** - component_id: exporter_cassandra name: exporter_cassandra paths: diff --git a/.github/ALLOWLIST b/.github/ALLOWLIST index 738871638c944..65cd0761183d7 100644 --- a/.github/ALLOWLIST +++ b/.github/ALLOWLIST @@ -29,7 +29,6 @@ internal/common # Start unmaintained components list -exporter/carbonexporter receiver/bigipreceiver receiver/carbonreceiver diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 87430b74d4929..ad0c0f8cf9e29 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -384,7 +384,6 @@ reports/distributions/otlp.yaml @open-telemetry/collector-contrib-approvers # Start unmaintained components list -exporter/carbonexporter/ @open-telemetry/collector-contrib-approvers receiver/bigipreceiver/ @open-telemetry/collector-contrib-approvers receiver/carbonreceiver/ @open-telemetry/collector-contrib-approvers diff --git a/.github/ISSUE_TEMPLATE/beta_stability.yaml b/.github/ISSUE_TEMPLATE/beta_stability.yaml index 0ddfc92670899..e707521fff669 100644 --- a/.github/ISSUE_TEMPLATE/beta_stability.yaml +++ b/.github/ISSUE_TEMPLATE/beta_stability.yaml @@ -50,7 +50,6 @@ body: - exporter/azuredataexplorer - exporter/azuremonitor - exporter/bmchelix - - exporter/carbon - exporter/cassandra - exporter/clickhouse - exporter/coralogix diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index be21bc819a98f..fc474c9caa68d 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -53,7 +53,6 @@ body: - exporter/azuredataexplorer - exporter/azuremonitor - exporter/bmchelix - - exporter/carbon - exporter/cassandra - exporter/clickhouse - exporter/coralogix diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index 0ef7ed39f673a..ca0d36a65fe0c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -47,7 +47,6 @@ body: - exporter/azuredataexplorer - exporter/azuremonitor - exporter/bmchelix - - exporter/carbon - exporter/cassandra - exporter/clickhouse - exporter/coralogix diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index 673942a7b5fb1..f51eab8daeb6b 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -47,7 +47,6 @@ body: - exporter/azuredataexplorer - exporter/azuremonitor - exporter/bmchelix - - exporter/carbon - exporter/cassandra - exporter/clickhouse - exporter/coralogix diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index c3b177bf90471..d8931a5affc6d 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -52,7 +52,6 @@ body: - exporter/azuredataexplorer - exporter/azuremonitor - exporter/bmchelix - - exporter/carbon - exporter/cassandra - exporter/clickhouse - exporter/coralogix diff --git a/.github/component_labels.txt b/.github/component_labels.txt index 268af43dfbd5b..fd7052e8ab29a 100644 --- a/.github/component_labels.txt +++ b/.github/component_labels.txt @@ -343,6 +343,5 @@ reports/distributions/core.yaml reports/distributions/core.yaml reports/distributions/contrib.yaml reports/distributions/contrib.yaml reports/distributions/k8s.yaml reports/distributions/k8s.yaml reports/distributions/otlp.yaml reports/distributions/otlp.yaml -exporter/carbonexporter exporter/carbon receiver/bigipreceiver receiver/bigip receiver/carbonreceiver receiver/carbon diff --git a/cmd/opampsupervisor/go.mod b/cmd/opampsupervisor/go.mod index 0fad555a3e2a3..03588f6edb4ea 100644 --- a/cmd/opampsupervisor/go.mod +++ b/cmd/opampsupervisor/go.mod @@ -283,8 +283,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experiment replace github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatasenders/mockdatadogagentexporter => ../../testbed/mockdatasenders/mockdatadogagentexporter -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter => ../../exporter/carbonexporter - replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../../pkg/resourcetotelemetry replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver => ../../receiver/zipkinreceiver diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index 2deb5144863df..4403408a166c5 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -75,7 +75,6 @@ exporters: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/azureblobexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/azuremonitorexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/bmchelixexporter v0.140.1 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/clickhouseexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/cassandraexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/coralogixexporter v0.140.1 diff --git a/cmd/oteltestbedcol/builder-config.yaml b/cmd/oteltestbedcol/builder-config.yaml index 5c57193572e98..49cf0d2a14981 100644 --- a/cmd/oteltestbedcol/builder-config.yaml +++ b/cmd/oteltestbedcol/builder-config.yaml @@ -19,7 +19,6 @@ exporters: - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.140.1-0.20251120204106-2e9c82787618 - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.140.1-0.20251120204106-2e9c82787618 - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.140.1-0.20251120204106-2e9c82787618 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opensearchexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter v0.140.1 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.140.1 diff --git a/exporter/carbonexporter/Makefile b/exporter/carbonexporter/Makefile deleted file mode 100644 index ded7a36092dc3..0000000000000 --- a/exporter/carbonexporter/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../Makefile.Common diff --git a/exporter/carbonexporter/README.md b/exporter/carbonexporter/README.md deleted file mode 100644 index ffbfe951fb279..0000000000000 --- a/exporter/carbonexporter/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Carbon Exporter - - -| Status | | -| ------------- |-----------| -| Stability | [unmaintained]: metrics | -| Distributions | [contrib] | -| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fcarbon%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fcarbon) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fcarbon%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fcarbon) | -| Code coverage | [![codecov](https://codecov.io/github/open-telemetry/opentelemetry-collector-contrib/graph/main/badge.svg?component=exporter_carbon)](https://app.codecov.io/gh/open-telemetry/opentelemetry-collector-contrib/tree/main/?components%5B0%5D=exporter_carbon&displayType=list) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | \| Seeking more code owners! | -| Emeritus | [@aboguszewski-sumo](https://www.github.com/aboguszewski-sumo) | - -[unmaintained]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#unmaintained -[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib - - -The [Carbon](https://github.com/graphite-project/carbon) exporter supports -Carbon's [plaintext -protocol](https://graphite.readthedocs.io/en/stable/feeding-carbon.html#the-plaintext-protocol). - -## Configuration - -The following settings are required: - -- `endpoint` (default = `localhost:2003`): Address and port that the - exporter should send data to. - -Example: - -```yaml -exporters: - carbon: - # by default it will export to localhost:2003 using tcp - carbon/allsettings: - # use endpoint to specify alternative destinations for the exporter, - # the default is localhost:2003 - endpoint: localhost:8080 - # timeout is the maximum duration allowed to connecting and sending the - # data to the configured endpoint. - # The default is 5 seconds. - timeout: 10s -``` - -The full list of settings exposed for this receiver are documented in [config.go](./config.go) -with detailed sample configurations in [testdata/config.yaml](./testdata/config.yaml). - -## Advanced Configuration - -Several helper files are leveraged to provide additional capabilities automatically: - -- [net settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confignet/README.md) -- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) diff --git a/exporter/carbonexporter/config.go b/exporter/carbonexporter/config.go deleted file mode 100644 index 9123d7cf0d1fb..0000000000000 --- a/exporter/carbonexporter/config.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter" - -import ( - "errors" - "fmt" - "net" - - "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/exporter/exporterhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" -) - -// Config defines configuration for Carbon exporter. -type Config struct { - // Specifies the connection endpoint config. The default value is "localhost:2003". - confignet.TCPAddrConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - // MaxIdleConns is used to set a limit to the maximum idle TCP connections the client can keep open. Default value is 100. - // If `sending_queue` is enabled, it is recommended to use same value as `sending_queue::num_consumers`. - MaxIdleConns int `mapstructure:"max_idle_conns"` - - // Timeout is the maximum duration allowed to connecting and sending the - // data to the Carbon/Graphite backend. The default value is 5s. - TimeoutSettings exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - QueueConfig exporterhelper.QueueBatchConfig `mapstructure:"sending_queue"` - RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` - - // ResourceToTelemetrySettings defines configuration for converting resource attributes to metric labels. - ResourceToTelemetryConfig resourcetotelemetry.Settings `mapstructure:"resource_to_telemetry_conversion"` -} - -func (cfg *Config) Validate() error { - // Resolve TCP address just to ensure that it is a valid one. It is better - // to fail here than at when the exporter is started. - if _, err := net.ResolveTCPAddr("tcp", cfg.Endpoint); err != nil { - return fmt.Errorf("exporter has an invalid TCP endpoint: %w", err) - } - - // Negative timeouts are not acceptable, since all sends will fail. - if cfg.TimeoutSettings.Timeout < 0 { - return errors.New("'timeout' must be non-negative") - } - - if cfg.MaxIdleConns < 0 { - return errors.New("'max_idle_conns' must be non-negative") - } - - return nil -} diff --git a/exporter/carbonexporter/config_test.go b/exporter/carbonexporter/config_test.go deleted file mode 100644 index aeef0b2ab031c..0000000000000 --- a/exporter/carbonexporter/config_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter - -import ( - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/confmap/confmaptest" - "go.opentelemetry.io/collector/confmap/xconfmap" - "go.opentelemetry.io/collector/exporter/exporterhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" -) - -func TestLoadConfig(t *testing.T) { - t.Parallel() - - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) - require.NoError(t, err) - - tests := []struct { - id component.ID - expected component.Config - errorMessage string - }{ - { - id: component.NewIDWithName(metadata.Type, ""), - expected: createDefaultConfig(), - }, - { - id: component.NewIDWithName(metadata.Type, "allsettings"), - expected: &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{ - Endpoint: "localhost:8080", - }, - MaxIdleConns: 15, - TimeoutSettings: exporterhelper.TimeoutConfig{ - Timeout: 10 * time.Second, - }, - RetryConfig: configretry.BackOffConfig{ - Enabled: true, - InitialInterval: 10 * time.Second, - RandomizationFactor: 0.7, - Multiplier: 3.14, - MaxInterval: 1 * time.Minute, - MaxElapsedTime: 10 * time.Minute, - }, - QueueConfig: func() exporterhelper.QueueBatchConfig { - queue := exporterhelper.NewDefaultQueueConfig() - queue.Enabled = true - queue.NumConsumers = 2 - queue.QueueSize = 10 - return queue - }(), - ResourceToTelemetryConfig: resourcetotelemetry.Settings{ - Enabled: true, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.id.String(), func(t *testing.T) { - factory := NewFactory() - cfg := factory.CreateDefaultConfig() - - sub, err := cm.Sub(tt.id.String()) - require.NoError(t, err) - require.NoError(t, sub.Unmarshal(cfg)) - - assert.NoError(t, xconfmap.Validate(cfg)) - assert.Equal(t, tt.expected, cfg) - }) - } -} - -func TestValidateConfig(t *testing.T) { - tests := []struct { - name string - config *Config - wantErr bool - }{ - { - name: "default_config", - config: createDefaultConfig().(*Config), - }, - { - name: "invalid_tcp_addr", - config: &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{ - Endpoint: "http://localhost:2003", - }, - }, - wantErr: true, - }, - { - name: "invalid_timeout", - config: &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{Endpoint: defaultEndpoint}, - TimeoutSettings: exporterhelper.TimeoutConfig{ - Timeout: -5 * time.Second, - }, - }, - wantErr: true, - }, - { - name: "invalid_max_idle_conns", - config: &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{Endpoint: defaultEndpoint}, - MaxIdleConns: -1, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.wantErr { - assert.Error(t, tt.config.Validate()) - } else { - assert.NoError(t, tt.config.Validate()) - } - }) - } -} diff --git a/exporter/carbonexporter/doc.go b/exporter/carbonexporter/doc.go deleted file mode 100644 index db743358016ff..0000000000000 --- a/exporter/carbonexporter/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:generate mdatagen metadata.yaml - -// Package carbonexporter implements an exporter that sends data to Carbon. -package carbonexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter" diff --git a/exporter/carbonexporter/exporter.go b/exporter/carbonexporter/exporter.go deleted file mode 100644 index 1680c2c8451c0..0000000000000 --- a/exporter/carbonexporter/exporter.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter" - -import ( - "context" - "net" - "sync" - "time" - - "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.uber.org/multierr" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" -) - -// newCarbonExporter returns a new Carbon exporter. -func newCarbonExporter(ctx context.Context, cfg *Config, set exporter.Settings) (exporter.Metrics, error) { - sender := carbonSender{ - writeTimeout: cfg.TimeoutSettings.Timeout, - conns: newConnPool(cfg.TCPAddrConfig, cfg.TimeoutSettings.Timeout, cfg.MaxIdleConns), - } - - exp, err := exporterhelper.NewMetrics( - ctx, - set, - cfg, - sender.pushMetricsData, - // We don't use exporterhelper.WithTimeout because the TCP connection does not accept writing with context. - exporterhelper.WithQueue(cfg.QueueConfig), - exporterhelper.WithRetry(cfg.RetryConfig), - exporterhelper.WithShutdown(sender.Shutdown)) - if err != nil { - return nil, err - } - - return resourcetotelemetry.WrapMetricsExporter(cfg.ResourceToTelemetryConfig, exp), nil -} - -// carbonSender is the struct tying the translation function and the TCP -// connections into an implementations of exporterhelper.PushMetricsData so -// the exporter can leverage the helper and get consistent observability. -type carbonSender struct { - writeTimeout time.Duration - conns connPool -} - -func (cs *carbonSender) pushMetricsData(_ context.Context, md pmetric.Metrics) error { - lines := metricDataToPlaintext(md) - - // There is no way to do a call equivalent to recvfrom with an empty buffer - // to check if the connection was terminated (if the size of the buffer is - // 0 the Read call doesn't call lower level). So due to buffer sizes it is - // possible that a write will succeed on a connection that was already - // closed by the server. - // - // At least on Darwin it is possible to work around this by configuring the - // buffer on each call, ie.: - // - // if err = conn.SetWriteBuffer(len(bytes)-1); err != nil { - // return 0, err - // } - // - // However, this causes a performance penalty of ~10% cpu and it is not - // present in various implementations of Carbon clients. Considering these - // facts this "workaround" is not being added at this moment. If it is - // needed in some scenarios the workaround should be validated on other - // platforms and offered as a configuration setting. - conn, err := cs.conns.get() - if err != nil { - return err - } - - if err = conn.SetWriteDeadline(time.Now().Add(cs.writeTimeout)); err != nil { - // Do not re-enqueue the connection since it failed to set a deadline. - return multierr.Append(err, conn.Close()) - } - - // If we did not write all bytes will get an error, so no need to check for that. - _, err = conn.Write([]byte(lines)) - if err != nil { - // Do not re-enqueue the connection since it failed to write. - return multierr.Append(err, conn.Close()) - } - - // Even if we close the connection because of the max idle connections, - cs.conns.put(conn) - return nil -} - -func (cs *carbonSender) Shutdown(context.Context) error { - return cs.conns.close() -} - -// connPool is a very simple implementation of a pool of net.Conn instances. -type connPool interface { - get() (net.Conn, error) - put(conn net.Conn) - close() error -} - -func newConnPool( - tcpConfig confignet.TCPAddrConfig, - timeout time.Duration, - maxIdleConns int, -) connPool { - if maxIdleConns == 0 { - return &nopConnPool{ - timeout: timeout, - tcpConfig: tcpConfig, - } - } - return &connPoolWithIdle{ - timeout: timeout, - tcpConfig: tcpConfig, - maxIdleConns: maxIdleConns, - } -} - -// nopConnPool is a very simple implementation that does not cache any net.Conn. -type nopConnPool struct { - timeout time.Duration - tcpConfig confignet.TCPAddrConfig -} - -func (cp *nopConnPool) get() (net.Conn, error) { - return createTCPConn(cp.tcpConfig, cp.timeout) -} - -func (*nopConnPool) put(conn net.Conn) { - _ = conn.Close() -} - -func (*nopConnPool) close() error { - return nil -} - -// connPool is a very simple implementation of a pool of net.Conn instances. -// -// It keeps at most maxIdleConns net.Conn and always "popping" the most -// recently returned to the pool. There is no accounting to terminating old -// unused connections. -type connPoolWithIdle struct { - timeout time.Duration - maxIdleConns int - mtx sync.Mutex - conns []net.Conn - tcpConfig confignet.TCPAddrConfig -} - -func (cp *connPoolWithIdle) get() (net.Conn, error) { - if conn := cp.getFromCache(); conn != nil { - return conn, nil - } - - return createTCPConn(cp.tcpConfig, cp.timeout) -} - -func (cp *connPoolWithIdle) put(conn net.Conn) { - cp.mtx.Lock() - defer cp.mtx.Unlock() - // Do not cache if above limit. - if len(cp.conns) > cp.maxIdleConns { - _ = conn.Close() - return - } - cp.conns = append(cp.conns, conn) -} - -func (cp *connPoolWithIdle) getFromCache() net.Conn { - cp.mtx.Lock() - defer cp.mtx.Unlock() - lastIdx := len(cp.conns) - 1 - if lastIdx < 0 { - return nil - } - conn := cp.conns[lastIdx] - cp.conns = cp.conns[0:lastIdx] - return conn -} - -func (cp *connPoolWithIdle) close() error { - cp.mtx.Lock() - defer cp.mtx.Unlock() - - var errs error - for _, conn := range cp.conns { - errs = multierr.Append(errs, conn.Close()) - } - cp.conns = nil - return errs -} - -func createTCPConn(tcpConfig confignet.TCPAddrConfig, timeout time.Duration) (net.Conn, error) { - c, err := net.DialTimeout("tcp", tcpConfig.Endpoint, timeout) - if err != nil { - return nil, err - } - return c, err -} diff --git a/exporter/carbonexporter/exporter_test.go b/exporter/carbonexporter/exporter_test.go deleted file mode 100644 index 4797c15576641..0000000000000 --- a/exporter/carbonexporter/exporter_test.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter - -import ( - "bufio" - "errors" - "io" - "net" - "runtime" - "strconv" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/exporter/exportertest" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - conventions "go.opentelemetry.io/otel/semconv/v1.27.0" - - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" -) - -func TestNewWithDefaultConfig(t *testing.T) { - cfg := createDefaultConfig().(*Config) - got, err := newCarbonExporter(t.Context(), cfg, exportertest.NewNopSettings(metadata.Type)) - assert.NotNil(t, got) - assert.NoError(t, err) -} - -func TestConsumeMetricsNoServer(t *testing.T) { - exp, err := newCarbonExporter( - t.Context(), - &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{Endpoint: testutil.GetAvailableLocalAddress(t)}, - TimeoutSettings: exporterhelper.TimeoutConfig{Timeout: 5 * time.Second}, - }, - exportertest.NewNopSettings(metadata.Type)) - require.NoError(t, err) - require.NoError(t, exp.Start(t.Context(), componenttest.NewNopHost())) - require.Error(t, exp.ConsumeMetrics(t.Context(), generateSmallBatch())) - require.NoError(t, exp.Shutdown(t.Context())) -} - -func TestConsumeMetricsWithResourceToTelemetry(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) - cs := newCarbonServer(t, addr, "test_0;key_0=value_0;key_1=value_1;key_2=value_2;service.name=carbon 0") - // Each metric point will generate one Carbon line, set up the wait - // for all of them. - cs.start(t, 1) - - exp, err := newCarbonExporter( - t.Context(), - &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{Endpoint: addr}, - TimeoutSettings: exporterhelper.TimeoutConfig{Timeout: 5 * time.Second}, - ResourceToTelemetryConfig: resourcetotelemetry.Settings{Enabled: true}, - }, - exportertest.NewNopSettings(metadata.Type)) - require.NoError(t, err) - require.NoError(t, exp.Start(t.Context(), componenttest.NewNopHost())) - require.NoError(t, exp.ConsumeMetrics(t.Context(), generateSmallBatch())) - assert.NoError(t, exp.Shutdown(t.Context())) - cs.shutdownAndVerify(t) -} - -func TestConsumeMetrics(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipping test on windows, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/10147") - } - - tests := []struct { - name string - md pmetric.Metrics - numProducers int - writesPerProducer int - }{ - { - name: "small_batch", - md: generateSmallBatch(), - numProducers: 1, - writesPerProducer: 5, - }, - { - name: "large_batch", - md: generateLargeBatch(), - numProducers: 1, - writesPerProducer: 5, - }, - { - name: "concurrent_small_batch", - md: generateSmallBatch(), - numProducers: 5, - writesPerProducer: 5, - }, - { - name: "concurrent_large_batch", - md: generateLargeBatch(), - numProducers: 5, - writesPerProducer: 5, - }, - { - name: "high_concurrency", - md: generateLargeBatch(), - numProducers: 10, - writesPerProducer: 200, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) - cs := newCarbonServer(t, addr, "") - // Each metric point will generate one Carbon line, set up the wait - // for all of them. - cs.start(t, tt.numProducers*tt.writesPerProducer*tt.md.DataPointCount()) - - exp, err := newCarbonExporter( - t.Context(), - &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{Endpoint: addr}, - MaxIdleConns: tt.numProducers, - TimeoutSettings: exporterhelper.TimeoutConfig{Timeout: 5 * time.Second}, - }, - exportertest.NewNopSettings(metadata.Type)) - require.NoError(t, err) - require.NoError(t, exp.Start(t.Context(), componenttest.NewNopHost())) - - startCh := make(chan struct{}) - var writersWG sync.WaitGroup - writersWG.Add(tt.numProducers) - for i := 0; i < tt.numProducers; i++ { - go func() { - defer writersWG.Done() - <-startCh - for j := 0; j < tt.writesPerProducer; j++ { - assert.NoError(t, exp.ConsumeMetrics(t.Context(), tt.md)) - } - }() - } - - // Release all senders. - close(startCh) - // Wait for all senders to finish. - writersWG.Wait() - - assert.NoError(t, exp.Shutdown(t.Context())) - cs.shutdownAndVerify(t) - }) - } -} - -func TestNewConnectionPool(t *testing.T) { - assert.IsType(t, &nopConnPool{}, newConnPool(confignet.TCPAddrConfig{Endpoint: defaultEndpoint}, 10*time.Second, 0)) - assert.IsType(t, &connPoolWithIdle{}, newConnPool(confignet.TCPAddrConfig{Endpoint: defaultEndpoint}, 10*time.Second, 10)) -} - -func TestNopConnPool(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) - cs := newCarbonServer(t, addr, "") - // Each metric point will generate one Carbon line, set up the wait - // for all of them. - cs.start(t, 2) - - cp := &nopConnPool{ - timeout: 1 * time.Second, - tcpConfig: confignet.TCPAddrConfig{Endpoint: addr}, - } - - conn, err := cp.get() - require.NoError(t, err) - _, err = conn.Write([]byte(metricDataToPlaintext(generateSmallBatch()))) - assert.NoError(t, err) - cp.put(conn) - - // Get a new connection and confirm is not the same. - conn2, err2 := cp.get() - require.NoError(t, err2) - assert.NotSame(t, conn, conn2) - _, err = conn2.Write([]byte(metricDataToPlaintext(generateSmallBatch()))) - assert.NoError(t, err) - cp.put(conn2) - - require.NoError(t, cp.close()) - cs.shutdownAndVerify(t) -} - -func TestConnPoolWithIdle(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) - cs := newCarbonServer(t, addr, "") - // Each metric point will generate one Carbon line, set up the wait - // for all of them. - cs.start(t, 2) - - cp := &connPoolWithIdle{ - timeout: 1 * time.Second, - tcpConfig: confignet.TCPAddrConfig{Endpoint: addr}, - maxIdleConns: 4, - } - - conn, err := cp.get() - require.NoError(t, err) - _, err = conn.Write([]byte(metricDataToPlaintext(generateSmallBatch()))) - assert.NoError(t, err) - cp.put(conn) - - // Get a new connection and confirm it is the same as the first one. - conn2, err2 := cp.get() - require.NoError(t, err2) - assert.Same(t, conn, conn2) - _, err = conn2.Write([]byte(metricDataToPlaintext(generateSmallBatch()))) - assert.NoError(t, err) - cp.put(conn2) - - require.NoError(t, cp.close()) - cs.shutdownAndVerify(t) -} - -func TestConnPoolWithIdleMaxConnections(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) - cs := newCarbonServer(t, addr, "") - const maxIdleConns = 4 - // Each metric point will generate one Carbon line, set up the wait - // for all of them. - cs.start(t, maxIdleConns+1) - - cp := &connPoolWithIdle{ - timeout: 1 * time.Second, - tcpConfig: confignet.TCPAddrConfig{Endpoint: addr}, - maxIdleConns: maxIdleConns, - } - - // Create connections and - var conns []net.Conn - for i := range maxIdleConns { - conn, err := cp.get() - require.NoError(t, err) - conns = append(conns, conn) - if i != 0 { - assert.NotSame(t, conn, conns[i-1]) - } - } - for _, conn := range conns { - cp.put(conn) - } - - for i := range maxIdleConns + 1 { - conn, err := cp.get() - require.NoError(t, err) - _, err = conn.Write([]byte(metricDataToPlaintext(generateSmallBatch()))) - assert.NoError(t, err) - if i != maxIdleConns { - assert.Same(t, conn, conns[maxIdleConns-i-1]) - } else { - // this should be a new connection - for _, cachedConn := range conns { - assert.NotSame(t, conn, cachedConn) - } - cp.put(conn) - } - } - for _, conn := range conns { - cp.put(conn) - } - require.NoError(t, cp.close()) - cs.shutdownAndVerify(t) -} - -func generateSmallBatch() pmetric.Metrics { - return generateMetricsBatch(1) -} - -func generateLargeBatch() pmetric.Metrics { - return generateMetricsBatch(1024) -} - -func generateMetricsBatch(size int) pmetric.Metrics { - ts := time.Now() - metrics := pmetric.NewMetrics() - rm := metrics.ResourceMetrics().AppendEmpty() - rm.Resource().Attributes().PutStr(string(conventions.ServiceNameKey), "carbon") - ms := rm.ScopeMetrics().AppendEmpty().Metrics() - - for i := range size { - m := ms.AppendEmpty() - m.SetName("test_" + strconv.Itoa(i)) - dp := m.SetEmptyGauge().DataPoints().AppendEmpty() - dp.Attributes().PutStr("key_0", "value_0") - dp.Attributes().PutStr("key_1", "value_1") - dp.Attributes().PutStr("key_2", "value_2") - dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) - dp.SetIntValue(int64(i)) - } - - return metrics -} - -type carbonServer struct { - ln *net.TCPListener - doneServer *atomic.Bool - wg sync.WaitGroup - expectedContainsValue string -} - -func newCarbonServer(t *testing.T, addr, expectedContainsValue string) *carbonServer { - laddr, err := net.ResolveTCPAddr("tcp", addr) - require.NoError(t, err) - ln, err := net.ListenTCP("tcp", laddr) - require.NoError(t, err) - return &carbonServer{ - ln: ln, - doneServer: &atomic.Bool{}, - expectedContainsValue: expectedContainsValue, - } -} - -func (cs *carbonServer) start(t *testing.T, numExpectedReq int) { - cs.wg.Add(numExpectedReq) - go func() { - for { - conn, err := cs.ln.Accept() - if cs.doneServer.Load() { - // Close is expected to cause error. - return - } - assert.NoError(t, err) - go func(conn net.Conn) { - defer func() { - assert.NoError(t, conn.Close()) - }() - - reader := bufio.NewReader(conn) - for { - buf, err := reader.ReadBytes(byte('\n')) - if errors.Is(err, io.EOF) { - return - } - assert.NoError(t, err) - - if cs.expectedContainsValue != "" { - assert.Contains(t, string(buf), cs.expectedContainsValue) - } - - cs.wg.Done() - } - }(conn) - } - }() - <-time.After(100 * time.Millisecond) -} - -func (cs *carbonServer) shutdownAndVerify(t *testing.T) { - cs.wg.Wait() - cs.doneServer.Store(true) - require.NoError(t, cs.ln.Close()) -} diff --git a/exporter/carbonexporter/factory.go b/exporter/carbonexporter/factory.go deleted file mode 100644 index 5622d86274829..0000000000000 --- a/exporter/carbonexporter/factory.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter" - -import ( - "context" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter/internal/metadata" -) - -// Defaults for not specified configuration settings. -const ( - defaultEndpoint = "localhost:2003" -) - -// NewFactory creates a factory for Carbon exporter. -func NewFactory() exporter.Factory { - return exporter.NewFactory( - metadata.Type, - createDefaultConfig, - exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability)) -} - -func createDefaultConfig() component.Config { - return &Config{ - TCPAddrConfig: confignet.TCPAddrConfig{ - Endpoint: defaultEndpoint, - }, - MaxIdleConns: 100, - TimeoutSettings: exporterhelper.NewDefaultTimeoutConfig(), - QueueConfig: exporterhelper.NewDefaultQueueConfig(), - RetryConfig: configretry.NewDefaultBackOffConfig(), - } -} - -func createMetricsExporter( - ctx context.Context, - params exporter.Settings, - config component.Config, -) (exporter.Metrics, error) { - exp, err := newCarbonExporter(ctx, config.(*Config), params) - if err != nil { - return nil, err - } - - return exp, nil -} diff --git a/exporter/carbonexporter/factory_test.go b/exporter/carbonexporter/factory_test.go deleted file mode 100644 index 20ffc1ff45de3..0000000000000 --- a/exporter/carbonexporter/factory_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/exporter/exportertest" - - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter/internal/metadata" -) - -func TestCreateDefaultConfig(t *testing.T) { - cfg := createDefaultConfig() - assert.NotNil(t, cfg, "failed to create default config") - assert.NoError(t, componenttest.CheckConfigStruct(cfg)) -} - -func TestCreateMetrics(t *testing.T) { - cfg := createDefaultConfig() - _, err := createMetricsExporter(t.Context(), exportertest.NewNopSettings(metadata.Type), cfg) - assert.NoError(t, err) -} - -func TestCreateInstanceViaFactory(t *testing.T) { - factory := NewFactory() - - cfg := factory.CreateDefaultConfig() - exp, err := factory.CreateMetrics( - t.Context(), - exportertest.NewNopSettings(metadata.Type), - cfg) - assert.NoError(t, err) - assert.NotNil(t, exp) - - // Set values that don't have a valid default. - // expCfg := cfg.(*Config) - - exp, err = factory.CreateMetrics( - t.Context(), - exportertest.NewNopSettings(metadata.Type), - cfg) - assert.NoError(t, err) - require.NotNil(t, exp) - - assert.NoError(t, exp.Shutdown(t.Context())) -} diff --git a/exporter/carbonexporter/generated_component_test.go b/exporter/carbonexporter/generated_component_test.go deleted file mode 100644 index 6c08c79ce3580..0000000000000 --- a/exporter/carbonexporter/generated_component_test.go +++ /dev/null @@ -1,152 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package carbonexporter - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/confmap/confmaptest" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exportertest" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/plog" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/pdata/ptrace" -) - -var typ = component.MustNewType("carbon") - -func TestComponentFactoryType(t *testing.T) { - require.Equal(t, typ, NewFactory().Type()) -} - -func TestComponentConfigStruct(t *testing.T) { - require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) -} - -func TestComponentLifecycle(t *testing.T) { - factory := NewFactory() - - tests := []struct { - createFn func(ctx context.Context, set exporter.Settings, cfg component.Config) (component.Component, error) - name string - }{ - - { - name: "metrics", - createFn: func(ctx context.Context, set exporter.Settings, cfg component.Config) (component.Component, error) { - return factory.CreateMetrics(ctx, set, cfg) - }, - }, - } - - cm, err := confmaptest.LoadConf("metadata.yaml") - require.NoError(t, err) - cfg := factory.CreateDefaultConfig() - sub, err := cm.Sub("tests::config") - require.NoError(t, err) - require.NoError(t, sub.Unmarshal(&cfg)) - - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), exportertest.NewNopSettings(typ), cfg) - require.NoError(t, err) - err = c.Shutdown(context.Background()) - require.NoError(t, err) - }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - c, err := tt.createFn(context.Background(), exportertest.NewNopSettings(typ), cfg) - require.NoError(t, err) - host := newMdatagenNopHost() - err = c.Start(context.Background(), host) - require.NoError(t, err) - require.NotPanics(t, func() { - switch tt.name { - case "logs": - e, ok := c.(exporter.Logs) - require.True(t, ok) - logs := generateLifecycleTestLogs() - if !e.Capabilities().MutatesData { - logs.MarkReadOnly() - } - err = e.ConsumeLogs(context.Background(), logs) - case "metrics": - e, ok := c.(exporter.Metrics) - require.True(t, ok) - metrics := generateLifecycleTestMetrics() - if !e.Capabilities().MutatesData { - metrics.MarkReadOnly() - } - err = e.ConsumeMetrics(context.Background(), metrics) - case "traces": - e, ok := c.(exporter.Traces) - require.True(t, ok) - traces := generateLifecycleTestTraces() - if !e.Capabilities().MutatesData { - traces.MarkReadOnly() - } - err = e.ConsumeTraces(context.Background(), traces) - } - }) - - err = c.Shutdown(context.Background()) - require.NoError(t, err) - }) - } -} - -func generateLifecycleTestLogs() plog.Logs { - logs := plog.NewLogs() - rl := logs.ResourceLogs().AppendEmpty() - rl.Resource().Attributes().PutStr("resource", "R1") - l := rl.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() - l.Body().SetStr("test log message") - l.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) - return logs -} - -func generateLifecycleTestMetrics() pmetric.Metrics { - metrics := pmetric.NewMetrics() - rm := metrics.ResourceMetrics().AppendEmpty() - rm.Resource().Attributes().PutStr("resource", "R1") - m := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - m.SetName("test_metric") - dp := m.SetEmptyGauge().DataPoints().AppendEmpty() - dp.Attributes().PutStr("test_attr", "value_1") - dp.SetIntValue(123) - dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) - return metrics -} - -func generateLifecycleTestTraces() ptrace.Traces { - traces := ptrace.NewTraces() - rs := traces.ResourceSpans().AppendEmpty() - rs.Resource().Attributes().PutStr("resource", "R1") - span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span.Attributes().PutStr("test_attr", "value_1") - span.SetName("test_span") - span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(-1 * time.Second))) - span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Now())) - return traces -} - -var _ component.Host = (*mdatagenNopHost)(nil) - -type mdatagenNopHost struct{} - -func newMdatagenNopHost() component.Host { - return &mdatagenNopHost{} -} - -func (mnh *mdatagenNopHost) GetExtensions() map[component.ID]component.Component { - return nil -} - -func (mnh *mdatagenNopHost) GetFactory(_ component.Kind, _ component.Type) component.Factory { - return nil -} diff --git a/exporter/carbonexporter/generated_package_test.go b/exporter/carbonexporter/generated_package_test.go deleted file mode 100644 index ad22b23641036..0000000000000 --- a/exporter/carbonexporter/generated_package_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package carbonexporter - -import ( - "testing" - - "go.uber.org/goleak" -) - -func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) -} diff --git a/exporter/carbonexporter/go.mod b/exporter/carbonexporter/go.mod deleted file mode 100644 index 69788eedf0150..0000000000000 --- a/exporter/carbonexporter/go.mod +++ /dev/null @@ -1,88 +0,0 @@ -module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter - -go 1.24.0 - -require ( - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.140.1 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.140.1 - github.com/stretchr/testify v1.11.1 - go.opentelemetry.io/collector/component v1.46.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/component/componenttest v0.140.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/config/confignet v1.46.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/config/configretry v1.46.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/confmap v1.46.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/confmap/xconfmap v0.140.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/exporter v1.46.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/exporter/exporterhelper v0.140.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/exporter/exportertest v0.140.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/collector/pdata v1.46.1-0.20251120204106-2e9c82787618 - go.opentelemetry.io/otel v1.38.0 - go.uber.org/goleak v1.3.0 - go.uber.org/multierr v1.11.0 -) - -require ( - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf/maps v0.1.2 // indirect - github.com/knadh/koanf/providers/confmap v1.0.0 // indirect - github.com/knadh/koanf/v2 v2.3.0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/collector/client v1.46.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/config/configoptional v1.46.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/consumer v1.46.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/consumer/xconsumer v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/exporter/xexporter v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/extension v1.46.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/extension/xextension v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/featuregate v1.46.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/pdata/xpdata v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/pipeline v1.46.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/receiver v1.46.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/collector/receiver/xreceiver v0.140.1-0.20251120204106-2e9c82787618 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/zap v1.27.1 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/sys v0.37.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/grpc v1.77.0 // indirect - google.golang.org/protobuf v1.36.10 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common - -retract ( - v0.76.2 - v0.76.1 - v0.65.0 -) - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../../pkg/resourcetotelemetry - -replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest - -replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden diff --git a/exporter/carbonexporter/go.sum b/exporter/carbonexporter/go.sum deleted file mode 100644 index 571b3288bc723..0000000000000 --- a/exporter/carbonexporter/go.sum +++ /dev/null @@ -1,152 +0,0 @@ -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= -github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= -github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE= -github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A= -github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM= -github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/collector/client v1.46.1-0.20251120204106-2e9c82787618 h1:xIU4szXwM+ausU1ZATfAGOiIAJLLDwnf6aSiL9X5LFs= -go.opentelemetry.io/collector/client v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:/Y2bm0RdD8LKIEQOX5YqqjglKNb8AYCdDuKb04/fURw= -go.opentelemetry.io/collector/component v1.46.1-0.20251120204106-2e9c82787618 h1:xIx6z5BadxNXT/1ZOeQNS8fcGkBBoS9gAPXlWOnfsJY= -go.opentelemetry.io/collector/component v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:Zp+JaUgGrPvt4JNzJU1MD7KcZhauab9W0pCykgGPSN0= -go.opentelemetry.io/collector/component/componenttest v0.140.1-0.20251120204106-2e9c82787618 h1:u5wbaLO4iOv6ym0sI2JmwKM0CidLm/wRo1wuKAdBI08= -go.opentelemetry.io/collector/component/componenttest v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:6u5eMJozhiF1p3sllc2p/07iqZMqkpHPvF/HZ0sRl9o= -go.opentelemetry.io/collector/config/confignet v1.46.1-0.20251120204106-2e9c82787618 h1:qByNo90PX0uSJhEDK36y4dVgtohhVxVNCKrRJwazHzk= -go.opentelemetry.io/collector/config/confignet v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:4jJWdoe1MmpqxMzxrIILcS5FK2JPocXYZGUvv5ZQVKE= -go.opentelemetry.io/collector/config/configoptional v1.46.1-0.20251120204106-2e9c82787618 h1:qCZBVjIIdgOodNnIPY4vjElWg52HLRTcBvWqNU5xHZw= -go.opentelemetry.io/collector/config/configoptional v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:XgGvHiFtro2MpPWbo4ExQ7CLnSBqzWAANfBIPv4QSVg= -go.opentelemetry.io/collector/config/configretry v1.46.1-0.20251120204106-2e9c82787618 h1:zM+7A+LEi5TZa1fu0xVLC+ir7/axuDqdYRkt+fiHgHk= -go.opentelemetry.io/collector/config/configretry v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:ZSTYqAJCq4qf+/4DGoIxCElDIl5yHt8XxEbcnpWBbMM= -go.opentelemetry.io/collector/confmap v1.46.1-0.20251120204106-2e9c82787618 h1:jlgrDds/sD1WZ7uLnvRrDk32OvJh/BP1dvbhA7kdJrg= -go.opentelemetry.io/collector/confmap v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:uqrwOuf+1PeZ9Zo/IDV9hJlvFy2eRKYUajkM1Lsmyto= -go.opentelemetry.io/collector/confmap/xconfmap v0.140.1-0.20251120204106-2e9c82787618 h1:FFT5OWG1bHYTCFUOoQozVsgN1723kBWZQOyNBgEsB4E= -go.opentelemetry.io/collector/confmap/xconfmap v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:KInqGVGClR7dDDJLkHsl3riO03et7TaBrGKVD5pD4i0= -go.opentelemetry.io/collector/consumer v1.46.1-0.20251120204106-2e9c82787618 h1:oVj1/83ioZC1nXbL6dubVg8Dvz9S+++T3CXur3a9NpU= -go.opentelemetry.io/collector/consumer v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:3hjV46vdz8zExuTKlxRge3VdeVUr0PJETqIMewKThNc= -go.opentelemetry.io/collector/consumer/consumererror v0.140.1-0.20251120204106-2e9c82787618 h1:ee9840Er+vNXKO+ULhG9VYOorESJDxfaerlEDvdlh8U= -go.opentelemetry.io/collector/consumer/consumererror v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:iBRUV6Pvm00HE5EI2+t2wNlP6MoGAAK9xMKLPeA+PZ4= -go.opentelemetry.io/collector/consumer/consumertest v0.140.1-0.20251120204106-2e9c82787618 h1:bondxHiwLGmSrJ3WiUaUwx5bvaUwP1b6/AvMUPqDoV0= -go.opentelemetry.io/collector/consumer/consumertest v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:LvDaKM5A7hUg7LWZBqk69sE0q5GrdM8BmLqX6kCP3WQ= -go.opentelemetry.io/collector/consumer/xconsumer v0.140.1-0.20251120204106-2e9c82787618 h1:F5We4BryZ1N3/pDdEqefIIZQzKPT7GaoOkTTGpfkd5w= -go.opentelemetry.io/collector/consumer/xconsumer v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:CtwSgAXVisCEJ+ElKeDa0yDo/Oie7l1vWAx1elFyWZc= -go.opentelemetry.io/collector/exporter v1.46.1-0.20251120204106-2e9c82787618 h1:fgbGpYDCRFHXe9NDI8YQY+Dif3GiPmQKWXkO/6gakos= -go.opentelemetry.io/collector/exporter v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:kpLf41bsppVa3IOCtavGG724DRK6AGT++PMnejp+FjA= -go.opentelemetry.io/collector/exporter/exporterhelper v0.140.1-0.20251120204106-2e9c82787618 h1:OjNN8y5sctrdoOs8R82zAMtKNWGMQm8eQbJmg3aP1UU= -go.opentelemetry.io/collector/exporter/exporterhelper v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:qmOnlpZ43k8y5xjeThMm5JmBYgwfO0Fyn7jys3azLxQ= -go.opentelemetry.io/collector/exporter/exportertest v0.140.1-0.20251120204106-2e9c82787618 h1:+S2kVxZdOgVNJOExhBoSA4MaDrNh7vPSXRv/SKO+VjA= -go.opentelemetry.io/collector/exporter/exportertest v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:ut6WpmCj7+10NqBFgZZfPF1gClMINEc8XKrnnAcBT84= -go.opentelemetry.io/collector/exporter/xexporter v0.140.1-0.20251120204106-2e9c82787618 h1:o6TAfHNqM6NXFzOpAyvGl8s86/9uVLUKeuiw7NP3ZA0= -go.opentelemetry.io/collector/exporter/xexporter v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:KIn0RaW66ifb6tXKz5XU+icFBVpn2QDH5QqaKdZDEJA= -go.opentelemetry.io/collector/extension v1.46.1-0.20251120204106-2e9c82787618 h1:dEOdTe91ephosris+SqcCNMigbTAVKVeTH5y/RKFWok= -go.opentelemetry.io/collector/extension v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:/NGiZQFF7hTyfRULTgtYw27cIW8i0hWUTp12lDftZS0= -go.opentelemetry.io/collector/extension/extensiontest v0.140.0 h1:a4ggfsp73GA9oGCxBtmQJE827SRq36E+YQIZ0MGIKVQ= -go.opentelemetry.io/collector/extension/extensiontest v0.140.0/go.mod h1:TKR1zB0CtJ3tedNyUUaeCw5O2qPlFNjHKmh2ri53uTU= -go.opentelemetry.io/collector/extension/xextension v0.140.1-0.20251120204106-2e9c82787618 h1:MSyc7Y/pAl+HYhGNe+xEnGQoCNkSjZLEhbtY5wjxUng= -go.opentelemetry.io/collector/extension/xextension v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:avzOyx3eIOr/AYcfsaBF9iMZVJnnp/UsdtJUNemYgcs= -go.opentelemetry.io/collector/featuregate v1.46.1-0.20251120204106-2e9c82787618 h1:kVcNLQHeTacufSIPasBJG7WbR02qxEBUWfrMAu/Un40= -go.opentelemetry.io/collector/featuregate v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4= -go.opentelemetry.io/collector/pdata v1.46.1-0.20251120204106-2e9c82787618 h1:w4Sd8D+T6wdekkBJlfjAsa7wpXDUmb3wQicikJ8vI9M= -go.opentelemetry.io/collector/pdata v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:AqZXTFkj01IxuiHZ1/I7UcGqaljvF5xiUXNYGxRqVp8= -go.opentelemetry.io/collector/pdata/pprofile v0.140.1-0.20251120204106-2e9c82787618 h1:EPM+f1DSlHtcTT32N2tfIwXY58N5lOChdYNEEgBk5uA= -go.opentelemetry.io/collector/pdata/pprofile v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:01EwjIBpIcmJva7IoXPmHPmACGzsGxFi9xhZhY7W4q8= -go.opentelemetry.io/collector/pdata/testdata v0.140.0 h1:jMhHRS8HbiYwXeElnuTNT+17QGUF+5A5MPgdSOjpJrw= -go.opentelemetry.io/collector/pdata/testdata v0.140.0/go.mod h1:4BZo10Ua0sbxrqMOPzVU4J/EJdE3js472lskyPW4re8= -go.opentelemetry.io/collector/pdata/xpdata v0.140.1-0.20251120204106-2e9c82787618 h1:ucCtCZIzRute6Mx7CE5RqgHNNT950uRdow5HtfbtJCg= -go.opentelemetry.io/collector/pdata/xpdata v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:yKJQ+zPe6c9teCbRwJ+1kK3Fw+pgtKgDXPLCKleZLJI= -go.opentelemetry.io/collector/pipeline v1.46.1-0.20251120204106-2e9c82787618 h1:shpb1oV7YQgGaPP59WC/A72B+wdki9DfdJ315O2UtnY= -go.opentelemetry.io/collector/pipeline v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI= -go.opentelemetry.io/collector/receiver v1.46.1-0.20251120204106-2e9c82787618 h1:lEhxCaW1jtBwV3lTPErL2YhfzH8J9FOn2zLtU6aqJt0= -go.opentelemetry.io/collector/receiver v1.46.1-0.20251120204106-2e9c82787618/go.mod h1:6AXBeYTN2iK2f8yNWPI7gz/3xpDLgF4L5DInhYeWBhE= -go.opentelemetry.io/collector/receiver/receivertest v0.140.1-0.20251120204106-2e9c82787618 h1:gKeCA1TJYgRUftF3mV2e3Cz4sd727vr2ZJBDODMiZr8= -go.opentelemetry.io/collector/receiver/receivertest v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:AhZsaTZ8CBeCd0m4WYycciOYltjw/E8AH6b7kKZIeTA= -go.opentelemetry.io/collector/receiver/xreceiver v0.140.1-0.20251120204106-2e9c82787618 h1:HIDI1rbOfXhuO6LUVZ9C7//rF6GloYcylIoC44102Cw= -go.opentelemetry.io/collector/receiver/xreceiver v0.140.1-0.20251120204106-2e9c82787618/go.mod h1:he6Lbg4S8T8dpwBTGwvRiR6SRMLB6iv0ZTWsOqGZ4iM= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE= -go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI= -go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8= -go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA= -go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk= -go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/net v0.46.1-0.20251013234738-63d1a5100f82 h1:6/3JGEh1C88g7m+qzzTbl3A0FtsLguXieqofVLU/JAo= -golang.org/x/net v0.46.1-0.20251013234738-63d1a5100f82/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporter/carbonexporter/internal/metadata/generated_status.go b/exporter/carbonexporter/internal/metadata/generated_status.go deleted file mode 100644 index f5e74171783dd..0000000000000 --- a/exporter/carbonexporter/internal/metadata/generated_status.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" -) - -var ( - Type = component.MustNewType("carbon") - ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter" -) - -const ( - MetricsStability = component.StabilityLevelUnmaintained -) diff --git a/exporter/carbonexporter/metadata.yaml b/exporter/carbonexporter/metadata.yaml deleted file mode 100644 index 9fe6074aedc32..0000000000000 --- a/exporter/carbonexporter/metadata.yaml +++ /dev/null @@ -1,14 +0,0 @@ -type: carbon - -status: - class: exporter - stability: - unmaintained: [metrics] - distributions: [contrib] - codeowners: - active: [] - emeritus: [aboguszewski-sumo] - seeking_new: true - -tests: - expect_consumer_error: true \ No newline at end of file diff --git a/exporter/carbonexporter/metricdata_to_plaintext.go b/exporter/carbonexporter/metricdata_to_plaintext.go deleted file mode 100644 index b10259bcfe946..0000000000000 --- a/exporter/carbonexporter/metricdata_to_plaintext.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter" - -import ( - "bytes" - "strconv" - "strings" - "sync" - - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -const ( - // sanitizedRune is used to replace any invalid char per Carbon format. - sanitizedRune = '_' - - // Tag related constants per Carbon plaintext protocol. - tagPrefix = ";" - tagKeyValueSeparator = "=" - tagValueEmptyPlaceholder = "" - tagLineEmptySpace = " " - tagLineNewLine = "\n" - - // Constants used when converting from distribution metrics to Carbon format. - distributionBucketSuffix = ".bucket" - distributionUpperBoundTagKey = "upper_bound" - distributionUpperBoundTagBeforeValue = tagPrefix + distributionUpperBoundTagKey + tagKeyValueSeparator - - // Constants used when converting from summary metrics to Carbon format. - summaryQuantileSuffix = ".quantile" - summaryQuantileTagKey = "quantile" - summaryQuantileTagBeforeValue = tagPrefix + summaryQuantileTagKey + tagKeyValueSeparator - - // Suffix to be added to original metric name for a Carbon metric representing - // a count metric for either distribution or summary metrics. - countSuffix = ".count" - - // Textual representation for positive infinity valid in Carbon, ie.: - // positive infinity as represented in Python. - infinityCarbonValue = "inf" -) - -var writerPool = sync.Pool{ - New: func() any { - // Start with a buffer of 1KB. - return bytes.NewBuffer(make([]byte, 0, 1024)) - }, -} - -// metricDataToPlaintext converts internal metrics data to the Carbon plaintext -// format as defined in https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol) -// and https://graphite.readthedocs.io/en/latest/tags.html#carbon. See details -// below. -// -// Each metric point becomes a single string with the following format: -// -// " " -// -// The contains the metric name and its tags and has the following, -// format: -// -// [;tag0;...;tagN] -// -// is the name of the metric and terminates either at the first ';' -// or at the end of the path. -// -// is of the form "key=val", where key can contain any char except ";!^=" and -// val can contain any char except ";~". -// -// The is the textual representation of the metric value. -// -// The is the Unix time text of when the measurement was made. -// -// The returned values are: -// - a string concatenating all generated "lines" (each single one representing -// a single Carbon metric. -// - number of time series successfully converted to carbon. -// - number of time series that could not be converted to Carbon. -func metricDataToPlaintext(md pmetric.Metrics) string { - if md.DataPointCount() == 0 { - return "" - } - - buf := writerPool.Get().(*bytes.Buffer) - buf.Reset() - defer writerPool.Put(buf) - - for i := 0; i < md.ResourceMetrics().Len(); i++ { - rm := md.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { - sm := rm.ScopeMetrics().At(j) - for k := 0; k < sm.Metrics().Len(); k++ { - metric := sm.Metrics().At(k) - if metric.Name() == "" { - // TODO: log error info - continue - } - switch metric.Type() { - case pmetric.MetricTypeGauge: - writeNumberDataPoints(buf, metric.Name(), metric.Gauge().DataPoints()) - case pmetric.MetricTypeSum: - writeNumberDataPoints(buf, metric.Name(), metric.Sum().DataPoints()) - case pmetric.MetricTypeHistogram: - formatHistogramDataPoints(buf, metric.Name(), metric.Histogram().DataPoints()) - case pmetric.MetricTypeSummary: - formatSummaryDataPoints(buf, metric.Name(), metric.Summary().DataPoints()) - } - } - } - } - - return buf.String() -} - -func writeNumberDataPoints(buf *bytes.Buffer, metricName string, dps pmetric.NumberDataPointSlice) { - for i := 0; i < dps.Len(); i++ { - dp := dps.At(i) - var valueStr string - switch dp.ValueType() { - case pmetric.NumberDataPointValueTypeEmpty: - continue // skip this data point - otherwise an empty string will be used as the value and the backend will use the timestamp as the metric value - case pmetric.NumberDataPointValueTypeInt: - valueStr = formatInt64(dp.IntValue()) - case pmetric.NumberDataPointValueTypeDouble: - valueStr = formatFloatForValue(dp.DoubleValue()) - } - writeLine( - buf, - buildPath(metricName, dp.Attributes()), - valueStr, - formatTimestamp(dp.Timestamp())) - } -} - -// formatHistogramDataPoints transforms a slice of histogram data points into a series -// of Carbon metrics and injects them into the string builder. -// -// Carbon doesn't have direct support to distribution metrics they will be -// translated into a series of Carbon metrics: -// -// 1. The total count will be represented by a metric named ".count". -// -// 2. The total sum will be represented by a metric with the original "". -// -// 3. Each histogram bucket is represented by a metric named ".bucket" -// and will include a dimension "upper_bound" that specifies the maximum value in -// that bucket. This metric specifies the number of events with a value that is -// less than or equal to the upper bound. -func formatHistogramDataPoints( - buf *bytes.Buffer, - metricName string, - dps pmetric.HistogramDataPointSlice, -) { - for i := 0; i < dps.Len(); i++ { - dp := dps.At(i) - - timestampStr := formatTimestamp(dp.Timestamp()) - formatCountAndSum(buf, metricName, dp.Attributes(), dp.Count(), dp.Sum(), timestampStr) - if dp.ExplicitBounds().Len() == 0 { - continue - } - - bounds := dp.ExplicitBounds().AsRaw() - carbonBounds := make([]string, len(bounds)+1) - for i := range bounds { - carbonBounds[i] = formatFloatForLabel(bounds[i]) - } - carbonBounds[len(carbonBounds)-1] = infinityCarbonValue - - bucketPath := buildPath(metricName+distributionBucketSuffix, dp.Attributes()) - for j := 0; j < dp.BucketCounts().Len(); j++ { - writeLine( - buf, - bucketPath+distributionUpperBoundTagBeforeValue+carbonBounds[j], - formatUint64(dp.BucketCounts().At(j)), - timestampStr) - } - } -} - -// formatSummaryDataPoints transforms a slice of summary data points into a series -// of Carbon metrics and injects them into the string builder. -// -// Carbon doesn't have direct support to summary metrics they will be -// translated into a series of Carbon metrics: -// -// 1. The total count will be represented by a metric named ".count". -// -// 2. The total sum will be represented by a metric with the original "". -// -// 3. Each quantile is represented by a metric named ".quantile" -// and will include a tag key "quantile" that specifies the quantile value. -func formatSummaryDataPoints( - buf *bytes.Buffer, - metricName string, - dps pmetric.SummaryDataPointSlice, -) { - for i := 0; i < dps.Len(); i++ { - dp := dps.At(i) - - timestampStr := formatTimestamp(dp.Timestamp()) - formatCountAndSum(buf, metricName, dp.Attributes(), dp.Count(), dp.Sum(), timestampStr) - - if dp.QuantileValues().Len() == 0 { - continue - } - - quantilePath := buildPath(metricName+summaryQuantileSuffix, dp.Attributes()) - for j := 0; j < dp.QuantileValues().Len(); j++ { - writeLine( - buf, - quantilePath+summaryQuantileTagBeforeValue+formatFloatForLabel(dp.QuantileValues().At(j).Quantile()*100), - formatFloatForValue(dp.QuantileValues().At(j).Value()), - timestampStr) - } - } -} - -// Carbon doesn't have direct support to distribution or summary metrics in both -// cases it needs to create a "count" and a "sum" metric. This function creates -// both, as follows: -// -// 1. The total count will be represented by a metric named ".count". -// -// 2. The total sum will be represented by a metruc with the original "". -func formatCountAndSum( - buf *bytes.Buffer, - metricName string, - attributes pcommon.Map, - count uint64, - sum float64, - timestampStr string, -) { - // Write count and sum metrics. - writeLine( - buf, - buildPath(metricName+countSuffix, attributes), - formatUint64(count), - timestampStr) - - writeLine( - buf, - buildPath(metricName, attributes), - formatFloatForValue(sum), - timestampStr) -} - -// buildPath is used to build the per description above. -func buildPath(name string, attributes pcommon.Map) string { - if attributes.Len() == 0 { - return name - } - - buf := writerPool.Get().(*bytes.Buffer) - buf.Reset() - defer writerPool.Put(buf) - - buf.WriteString(name) - for k, v := range attributes.All() { - value := v.AsString() - if value == "" { - value = tagValueEmptyPlaceholder - } - buf.WriteString(tagPrefix) - buf.WriteString(sanitizeTagKey(k)) - buf.WriteString(tagKeyValueSeparator) - buf.WriteString(value) - } - - return buf.String() -} - -// writeLine builds a single Carbon metric textual line, ie.: it already adds -// a new-line character at the end of the string. -func writeLine(buf *bytes.Buffer, path, value, timestamp string) { - buf.WriteString(path) - buf.WriteString(tagLineEmptySpace) - buf.WriteString(value) - buf.WriteString(tagLineEmptySpace) - buf.WriteString(timestamp) - buf.WriteString(tagLineNewLine) -} - -// sanitizeTagKey removes any invalid character from the tag key, the invalid -// characters are ";!^=". -func sanitizeTagKey(key string) string { - mapRune := func(r rune) rune { - switch r { - case ';', '!', '^', '=': - return sanitizedRune - default: - return r - } - } - - return strings.Map(mapRune, key) -} - -// sanitizeTagValue removes any invalid character from the tag value, the invalid -// characters are ";~". -func sanitizeTagValue(value string) string { - mapRune := func(r rune) rune { - switch r { - case ';', '~': - return sanitizedRune - default: - return r - } - } - - return strings.Map(mapRune, value) -} - -// Formats a float64 per Prometheus label value. This is an attempt to keep other -// the label values with different formats of metrics. -func formatFloatForLabel(f float64) string { - return strconv.FormatFloat(f, 'g', -1, 64) -} - -// Formats a float64 per Carbon plaintext format. -func formatFloatForValue(f float64) string { - return strconv.FormatFloat(f, 'f', -1, 64) -} - -func formatUint64(i uint64) string { - return strconv.FormatUint(i, 10) -} - -func formatInt64(i int64) string { - return strconv.FormatInt(i, 10) -} - -func formatTimestamp(timestamp pcommon.Timestamp) string { - return formatUint64(uint64(timestamp) / 1e9) -} diff --git a/exporter/carbonexporter/metricdata_to_plaintext_test.go b/exporter/carbonexporter/metricdata_to_plaintext_test.go deleted file mode 100644 index 8cd23605a9b85..0000000000000 --- a/exporter/carbonexporter/metricdata_to_plaintext_test.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package carbonexporter - -import ( - "strconv" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -func TestSanitizeTagKey(t *testing.T) { - tests := []struct { - name string - key string - want string - }{ - { - name: "no_changes", - key: "a valid tag key", - want: "a valid tag key", - }, - { - name: "remove_tag_set", - key: "a" + tagKeyValueSeparator + "c", - want: "a" + string(sanitizedRune) + "c", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := sanitizeTagKey(tt.key) - assert.Equal(t, tt.want, got) - }) - } -} - -func TestSanitizeTagValue(t *testing.T) { - tests := []struct { - name string - value string - want string - }{ - { - name: "no_changes", - value: "a valid tag value", - want: "a valid tag value", - }, - { - name: "replace_tilde", - value: "a~c", - want: "a" + string(sanitizedRune) + "c", - }, - { - name: "replace_semicol", - value: "a;c", - want: "a" + string(sanitizedRune) + "c", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := sanitizeTagValue(tt.value) - assert.Equal(t, tt.want, got) - }) - } -} - -func TestBuildPath(t *testing.T) { - tests := []struct { - name string - attributes pcommon.Map - want string - }{ - { - name: "happy_path", - attributes: func() pcommon.Map { - attr := pcommon.NewMap() - attr.PutStr("key0", "val0") - return attr - }(), - want: "happy_path;key0=val0", - }, - { - name: "empty_value", - attributes: func() pcommon.Map { - attr := pcommon.NewMap() - attr.PutStr("k0", "") - attr.PutStr("k1", "v1") - return attr - }(), - want: "empty_value;k0=" + tagValueEmptyPlaceholder + ";k1=v1", - }, - { - name: "int_value", - attributes: func() pcommon.Map { - attr := pcommon.NewMap() - attr.PutInt("k", 1) - return attr - }(), - want: "int_value;k=1", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := buildPath(tt.name, tt.attributes) - assert.Equal(t, tt.want, got) - }) - } -} - -func TestToPlaintext(t *testing.T) { - unixSecs := int64(1574092046) - expectedUnixSecsStr := strconv.FormatInt(unixSecs, 10) - unixNSecs := int64(11 * time.Millisecond) - tsUnix := time.Unix(unixSecs, unixNSecs) - - doubleVal := 1234.5678 - expectedDobuleValStr := strconv.FormatFloat(doubleVal, 'g', -1, 64) - int64Val := int64(123) - expectedInt64ValStr := "123" - - distributionCount := uint64(16) - distributionSum := float64(34.56) - distributionBounds := []float64{1.5, 2, 4} - distributionCounts := []uint64{4, 2, 3, 7} - - summaryCount := uint64(11) - summarySum := float64(111) - summaryQuantiles := []float64{90, 95, 99, 99.9} - summaryQuantileValues := []float64{100, 6, 4, 1} - tests := []struct { - name string - metricsDataFn func() pmetric.Metrics - wantLines []string - wantExtraLinesCount int - }{ - { - name: "gauge", - metricsDataFn: func() pmetric.Metrics { - md := pmetric.NewMetrics() - ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() - ms.AppendEmpty().SetName("gauge_double_no_dims") - dps1 := ms.At(0).SetEmptyGauge().DataPoints() - dps1.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps1.At(0).SetDoubleValue(doubleVal) - ms.AppendEmpty().SetName("gauge_int_no_dims") - dps2 := ms.At(1).SetEmptyGauge().DataPoints() - dps2.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps2.At(0).SetIntValue(int64Val) - ms.AppendEmpty().SetName("gauge_double_with_dims") - dps3 := ms.At(2).SetEmptyGauge().DataPoints() - dps3.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps3.At(0).Attributes().PutStr("k0", "v0") - dps3.At(0).Attributes().PutStr("k1", "v1") - dps3.At(0).SetDoubleValue(doubleVal) - ms.AppendEmpty().SetName("gauge_int_with_dims") - dps4 := ms.At(3).SetEmptyGauge().DataPoints() - dps4.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps4.At(0).Attributes().PutStr("k0", "v0") - dps4.At(0).Attributes().PutStr("k1", "v1") - dps4.At(0).SetIntValue(int64Val) - ms.AppendEmpty().SetName("gauge_no_value") - dps5 := ms.At(4).SetEmptyGauge().DataPoints() - dps5.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - return md - }, - wantLines: []string{ - "gauge_double_no_dims " + expectedDobuleValStr + " " + expectedUnixSecsStr, - "gauge_int_no_dims " + expectedInt64ValStr + " " + expectedUnixSecsStr, - "gauge_double_with_dims;k0=v0;k1=v1 " + expectedDobuleValStr + " " + expectedUnixSecsStr, - "gauge_int_with_dims;k0=v0;k1=v1 " + expectedInt64ValStr + " " + expectedUnixSecsStr, - }, - }, - { - name: "cumulative_monotonic_sum", - metricsDataFn: func() pmetric.Metrics { - md := pmetric.NewMetrics() - ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() - ms.AppendEmpty().SetName("cumulative_double_no_dims") - ms.At(0).SetEmptySum().SetIsMonotonic(true) - dps1 := ms.At(0).Sum().DataPoints() - dps1.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps1.At(0).SetDoubleValue(doubleVal) - ms.AppendEmpty().SetName("cumulative_int_no_dims") - ms.At(1).SetEmptySum().SetIsMonotonic(true) - dps2 := ms.At(1).Sum().DataPoints() - dps2.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps2.At(0).SetIntValue(int64Val) - ms.AppendEmpty().SetName("cumulative_double_with_dims") - ms.At(2).SetEmptySum().SetIsMonotonic(true) - dps3 := ms.At(2).Sum().DataPoints() - dps3.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps3.At(0).Attributes().PutStr("k0", "v0") - dps3.At(0).Attributes().PutStr("k1", "v1") - dps3.At(0).SetDoubleValue(doubleVal) - ms.AppendEmpty().SetName("cumulative_int_with_dims") - ms.At(3).SetEmptySum().SetIsMonotonic(true) - dps4 := ms.At(3).Sum().DataPoints() - dps4.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dps4.At(0).Attributes().PutStr("k0", "v0") - dps4.At(0).Attributes().PutStr("k1", "v1") - dps4.At(0).SetIntValue(int64Val) - ms.AppendEmpty().SetName("cumulative_no_value") - ms.At(4).SetEmptySum().SetIsMonotonic(true) - dps5 := ms.At(4).Sum().DataPoints() - dps5.AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - return md - }, - wantLines: []string{ - "cumulative_double_no_dims " + expectedDobuleValStr + " " + expectedUnixSecsStr, - "cumulative_int_no_dims " + expectedInt64ValStr + " " + expectedUnixSecsStr, - "cumulative_double_with_dims;k0=v0;k1=v1 " + expectedDobuleValStr + " " + expectedUnixSecsStr, - "cumulative_int_with_dims;k0=v0;k1=v1 " + expectedInt64ValStr + " " + expectedUnixSecsStr, - }, - }, - { - name: "histogram", - metricsDataFn: func() pmetric.Metrics { - md := pmetric.NewMetrics() - ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() - ms.AppendEmpty().SetName("distrib") - ms.At(0).SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - dp := ms.At(0).SetEmptyHistogram().DataPoints().AppendEmpty() - dp.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dp.Attributes().PutStr("k0", "v0") - dp.Attributes().PutStr("k1", "v1") - dp.SetCount(distributionCount) - dp.SetSum(distributionSum) - dp.ExplicitBounds().FromRaw(distributionBounds) - dp.BucketCounts().FromRaw(distributionCounts) - return md - }, - wantLines: expectedDistributionLines( - "distrib", ";k0=v0;k1=v1", expectedUnixSecsStr, - distributionSum, - distributionCount, - distributionBounds, - distributionCounts), - }, - { - name: "summary", - metricsDataFn: func() pmetric.Metrics { - md := pmetric.NewMetrics() - ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() - ms.AppendEmpty().SetName("summary") - dp := ms.At(0).SetEmptySummary().DataPoints().AppendEmpty() - dp.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) - dp.Attributes().PutStr("k0", "v0") - dp.Attributes().PutStr("k1", "v1") - dp.SetCount(summaryCount) - dp.SetSum(summarySum) - for i := range summaryQuantiles { - qv := dp.QuantileValues().AppendEmpty() - qv.SetQuantile(summaryQuantiles[i] / 100) - qv.SetValue(summaryQuantileValues[i]) - } - return md - }, - wantLines: expectedSummaryLines( - "summary", ";k0=v0;k1=v1", expectedUnixSecsStr, - summarySum, - summaryCount, - summaryQuantiles, - summaryQuantileValues), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotLines := metricDataToPlaintext(tt.metricsDataFn()) - got := strings.Split(gotLines, "\n") - got = got[:len(got)-1] - assert.Len(t, got, len(tt.wantLines)+tt.wantExtraLinesCount) - assert.Subset(t, tt.wantLines, got) - }) - } -} - -func expectedDistributionLines( - metricName string, - tags string, - timestampStr string, - sum float64, - count uint64, - bounds []float64, - counts []uint64, -) []string { - var lines []string - lines = append(lines, - metricName+".count"+tags+" "+formatInt64(int64(count))+" "+timestampStr, - metricName+tags+" "+formatFloatForLabel(sum)+" "+timestampStr, - metricName+".bucket"+tags+";upper_bound=inf "+formatInt64(int64(counts[len(bounds)]))+" "+timestampStr, - ) - for i, bound := range bounds { - lines = append(lines, - metricName+".bucket"+tags+";upper_bound="+formatFloatForLabel(bound)+" "+formatInt64(int64(counts[i]))+" "+timestampStr) - } - - return lines -} - -func expectedSummaryLines( - metricName string, - tags string, - timestampStr string, - sum float64, - count uint64, - summaryQuantiles []float64, - summaryQuantileValues []float64, -) []string { - var lines []string - lines = append(lines, - metricName+".count"+tags+" "+formatInt64(int64(count))+" "+timestampStr, - metricName+tags+" "+formatFloatForValue(sum)+" "+timestampStr, - ) - for i := range summaryQuantiles { - lines = append(lines, - metricName+".quantile"+tags+";quantile="+formatFloatForLabel(summaryQuantiles[i])+" "+formatFloatForValue(summaryQuantileValues[i])+" "+timestampStr) - } - return lines -} - -func BenchmarkConsumeMetricsDefault(b *testing.B) { - md := generateSmallBatch() - - b.ReportAllocs() - for b.Loop() { - assert.Len(b, metricDataToPlaintext(md), 62) - } -} diff --git a/exporter/carbonexporter/testdata/config.yaml b/exporter/carbonexporter/testdata/config.yaml deleted file mode 100644 index a0d330fbf5954..0000000000000 --- a/exporter/carbonexporter/testdata/config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -carbon: -# by default it will export to localhost:2003 using tcp -carbon/allsettings: - # use endpoint to specify alternative destinations for the exporter, - # the default is localhost:2003 - endpoint: localhost:8080 - max_idle_conns: 15 - # timeout is the maximum duration allowed to connecting and sending the - # data to the Carbon/Graphite backend. - # The default is 5 seconds. - timeout: 10s - sending_queue: - enabled: true - num_consumers: 2 - queue_size: 10 - retry_on_failure: - enabled: true - initial_interval: 10s - randomization_factor: 0.7 - multiplier: 3.14 - max_interval: 60s - max_elapsed_time: 10m - resource_to_telemetry_conversion: - enabled: true diff --git a/exporter/elasticsearchexporter/integrationtest/go.mod b/exporter/elasticsearchexporter/integrationtest/go.mod index 48d2ccb3ad031..d19b31215ca5a 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.mod +++ b/exporter/elasticsearchexporter/integrationtest/go.mod @@ -314,8 +314,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperre replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver => ../../../receiver/datadogreceiver -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter => ../../carbonexporter - replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter => ../../splunkhecexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter => ../../prometheusexporter diff --git a/internal/tidylist/tidylist.txt b/internal/tidylist/tidylist.txt index 22955514f6e81..648c5a75fba89 100644 --- a/internal/tidylist/tidylist.txt +++ b/internal/tidylist/tidylist.txt @@ -8,9 +8,6 @@ pkg/ottl connector/routingconnector internal/pdatautil connector/spanmetricsconnector -internal/common -pkg/resourcetotelemetry -exporter/carbonexporter internal/grpcutil internal/sharedcomponent receiver/otelarrowreceiver @@ -20,6 +17,8 @@ receiver/otelarrowreceiver internal/otelarrow exporter/otelarrowexporter receiver/otelarrowreceiver +internal/common +pkg/resourcetotelemetry pkg/translator/prometheus pkg/translator/prometheusremotewrite exporter/prometheusremotewriteexporter diff --git a/reports/distributions/contrib.yaml b/reports/distributions/contrib.yaml index 2661429cfd286..04432f4181d51 100644 --- a/reports/distributions/contrib.yaml +++ b/reports/distributions/contrib.yaml @@ -26,7 +26,6 @@ components: - azuredataexplorer - azuremonitor - bmchelix - - carbon - cassandra - clickhouse - coralogix diff --git a/testbed/datasenders/carbon.go b/testbed/datasenders/carbon.go deleted file mode 100644 index 785c6a305c47a..0000000000000 --- a/testbed/datasenders/carbon.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package datasenders // import "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datasenders" - -import ( - "context" - "fmt" - "time" - - "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/exporter/exportertest" - "go.uber.org/zap" - - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter" - "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" -) - -// CarbonDataSender implements MetricDataSender for Carbon metrics protocol. -type CarbonDataSender struct { - testbed.DataSenderBase - consumer.Metrics -} - -// Ensure CarbonDataSender implements MetricDataSenderOld. -var _ testbed.MetricDataSender = (*CarbonDataSender)(nil) - -// NewCarbonDataSender creates a new Carbon metric protocol sender that will send -// to the specified port after Start is called. -func NewCarbonDataSender(port int) *CarbonDataSender { - return &CarbonDataSender{ - DataSenderBase: testbed.DataSenderBase{ - Port: port, - Host: testbed.DefaultHost, - }, - } -} - -// Start the sender. -func (cs *CarbonDataSender) Start() error { - factory := carbonexporter.NewFactory() - cfg := &carbonexporter.Config{ - TCPAddrConfig: confignet.TCPAddrConfig{ - Endpoint: cs.GetEndpoint().String(), - }, - TimeoutSettings: exporterhelper.TimeoutConfig{ - Timeout: 5 * time.Second, - }, - } - params := exportertest.NewNopSettings(factory.Type()) - params.Logger = zap.L() - - exporter, err := factory.CreateMetrics(context.Background(), params, cfg) - if err != nil { - return err - } - - cs.Metrics = exporter - return nil -} - -// GenConfigYAMLStr returns receiver config for the agent. -func (cs *CarbonDataSender) GenConfigYAMLStr() string { - // Note that this generates a receiver config for agent. - return fmt.Sprintf(` - carbon: - endpoint: %s`, cs.GetEndpoint()) -} - -// ProtocolName returns protocol name as it is specified in Collector config. -func (*CarbonDataSender) ProtocolName() string { - return "carbon" -} diff --git a/testbed/go.mod b/testbed/go.mod index bb82316f15d91..5b69d3a21118f 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -7,7 +7,6 @@ require ( github.com/jaegertracing/jaeger-idl v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector v0.140.1 github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.140.1 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.140.1 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter v0.140.1 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.140.1 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.140.1 @@ -432,8 +431,6 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/span replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector => ../connector/routingconnector -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter => ../exporter/carbonexporter - replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter => ../exporter/prometheusexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter => ../exporter/prometheusremotewriteexporter diff --git a/testbed/stabilitytests/metric_test.go b/testbed/stabilitytests/metric_test.go index a16abba531bbc..f49ac5a8e47b0 100644 --- a/testbed/stabilitytests/metric_test.go +++ b/testbed/stabilitytests/metric_test.go @@ -30,23 +30,6 @@ func TestStabilityMetricsOTLP(t *testing.T) { ) } -func TestStabilityMetricsCarbon(t *testing.T) { - scenarios.Scenario10kItemsPerSecond( - t, - datasenders.NewCarbonDataSender(testutil.GetAvailablePort(t)), - datareceivers.NewCarbonDataReceiver(testutil.GetAvailablePort(t)), - testbed.ResourceSpec{ - ExpectedMaxCPU: 237, - ExpectedMaxRAM: 120, - ResourceCheckPeriod: resourceCheckPeriod, - }, - contribPerfResultsSummary, - nil, - nil, - nil, - ) -} - func TestStabilityMetricsSignalFx(t *testing.T) { scenarios.Scenario10kItemsPerSecond( t, diff --git a/testbed/tests/metric_test.go b/testbed/tests/metric_test.go index 6409b02bc6f85..c45092051edeb 100644 --- a/testbed/tests/metric_test.go +++ b/testbed/tests/metric_test.go @@ -28,15 +28,6 @@ func TestMetric10kDPS(t *testing.T) { resourceSpec testbed.ResourceSpec skipMessage string }{ - { - name: "Carbon", - sender: datasenders.NewCarbonDataSender(testutil.GetAvailablePort(t)), - receiver: datareceivers.NewCarbonDataReceiver(testutil.GetAvailablePort(t)), - resourceSpec: testbed.ResourceSpec{ - ExpectedMaxCPU: 237, - ExpectedMaxRAM: 150, - }, - }, { name: "OTLP", sender: testbed.NewOTLPMetricDataSender(testbed.DefaultHost, testutil.GetAvailablePort(t)), diff --git a/versions.yaml b/versions.yaml index afcce7e163f8d..1dbb2c4ad4fc4 100644 --- a/versions.yaml +++ b/versions.yaml @@ -39,7 +39,6 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/azuredataexplorerexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/azuremonitorexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/bmchelixexporter - - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/cassandraexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/clickhouseexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/coralogixexporter From c69a8038494c310bf932dd335263c92203fc9474 Mon Sep 17 00:00:00 2001 From: Khushi Jain Date: Thu, 27 Nov 2025 13:37:27 +0530 Subject: [PATCH 25/41] [chore][connector/failoverconnector] Fix typo in logsrouter (#44515) #### Description This PR fixes a typo on LogsRouter --- connector/failoverconnector/logs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/connector/failoverconnector/logs.go b/connector/failoverconnector/logs.go index 9cfa0a8761a8a..2f82848daede3 100644 --- a/connector/failoverconnector/logs.go +++ b/connector/failoverconnector/logs.go @@ -98,7 +98,7 @@ func newLogsToLogs(set connector.Settings, cfg component.Config, logs consumer.L config := cfg.(*Config) lr, ok := logs.(connector.LogsRouterAndConsumer) if !ok { - return nil, errors.New("consumer is not of type MetricsRouter") + return nil, errors.New("consumer is not of type LogsRouter") } failover, err := newLogsRouter(lr.Consumer, config) From 7d759d157a7e8e1f5c0110c26333e9306c407f59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Natalie=20Pierce=20=F0=9F=8F=B3=EF=B8=8F=E2=80=8D=E2=9A=A7?= =?UTF-8?q?=EF=B8=8F?= <32561617+naptalie@users.noreply.github.com> Date: Thu, 27 Nov 2025 03:10:49 -0500 Subject: [PATCH 26/41] [chore][receiver/prometheusreceiver] Increase test coverage for internal packages (#44292) ## Description This PR increases test coverage for the prometheusreceiver's internal/targetallocator package as part of ongoing efforts to improve code quality and test coverage across the OpenTelemetry Collector Contrib repository. **Fixes #44183** ## Changes #### Enhanced `config_test.go` with comprehensive validation tests: - **TestConfigValidate_InvalidEndpoint**: Comprehensive validation of endpoint and collectorID fields including: - Malformed URLs - Empty endpoint values - Empty collectorID values - Variable interpolation in collectorID (e.g., `${POD_NAME}`) - **TestConvertTLSVersion**: Tests TLS version string conversion for all supported versions: - TLS 1.0, 1.1, 1.2, 1.3 - Invalid version numbers (2.0) - Invalid format strings - **TestCheckTLSConfig**: Tests TLS configuration validation: - Empty configuration (valid) - Valid certificate and key file combinations - Non-existent certificate files - Non-existent key files #### Created `manager_unit_test.go` with unit tests for core functionality: - **TestNewManager**: Validates proper initialization of Manager struct, including: - Configuration storage - Shutdown channel initialization - Initial scrape configs - Native histogram flag - **TestManagerShutdown**: Ensures proper shutdown behavior: - Shutdown channel closure - Graceful goroutine termination - Log message validation - **TestInstantiateShard**: Tests SHARD environment variable substitution: - SHARD variable set - SHARD variable unset (defaults to 0) - Multiple SHARD placeholders in single string - No SHARD placeholders - **TestGetScrapeConfigsResponse_InvalidURL**: Validates error handling for malformed URLs - **TestGetScrapeConfigsResponse_HTTPError**: Tests handling of HTTP error responses (500 status codes) - **TestGetScrapeConfigsResponse_InvalidYAML**: Verifies error handling for invalid YAML responses from target allocator **Technical Note**: The new test file `manager_unit_test.go` was created separately from `manager_test.go` because the existing file uses `//go:build !race` tag, which excludes tests when running with the race detector. The makefile's `test-with-cover` target uses race detection, so tests in `manager_test.go` would not be executed during coverage runs. The new file runs with race detection enabled, ensuring tests are properly executed during CI/CD pipelines. ## Testing All tests pass successfully with race detection enabled: ```bash cd receiver/prometheusreceiver/internal/targetallocator go test -race -cover PASS coverage: 77.4% of statements ``` ## Checklist - [x] Tests added for new functionality - [x] Code follows OpenTelemetry coding standards - [x] All tests pass with race detection enabled - [x] Commit messages follow conventional commit format with proper component tags --------- Co-authored-by: Claude --- .../internal/targetallocator/config_test.go | 161 +++ .../internal/targetallocator/manager.go | 12 +- .../internal/targetallocator/manager_test.go | 1024 ++--------------- .../targetallocator/manager_unsafe_test.go | 969 ++++++++++++++++ 4 files changed, 1264 insertions(+), 902 deletions(-) create mode 100644 receiver/prometheusreceiver/internal/targetallocator/manager_unsafe_test.go diff --git a/receiver/prometheusreceiver/internal/targetallocator/config_test.go b/receiver/prometheusreceiver/internal/targetallocator/config_test.go index 92f45874224e6..b64a356768401 100644 --- a/receiver/prometheusreceiver/internal/targetallocator/config_test.go +++ b/receiver/prometheusreceiver/internal/targetallocator/config_test.go @@ -67,3 +67,164 @@ func TestPromHTTPClientConfigValidateMain(t *testing.T) { cfg.BearerTokenFile = filepath.Join("testdata", "dummy-tls-key-file") require.Error(t, xconfmap.Validate(cfg)) } + +func TestConfigValidate_InvalidEndpoint(t *testing.T) { + tests := []struct { + name string + endpoint string + collectorID string + expectError bool + }{ + { + name: "valid config", + endpoint: "http://localhost:8080", + collectorID: "collector-1", + expectError: false, + }, + { + name: "invalid endpoint - malformed", + endpoint: "://invalid", + collectorID: "collector-1", + expectError: true, + }, + { + name: "invalid endpoint - empty", + endpoint: "", + collectorID: "collector-1", + expectError: true, + }, + { + name: "invalid collectorID - empty", + endpoint: "http://localhost:8080", + collectorID: "", + expectError: true, + }, + { + name: "invalid collectorID - contains variable", + endpoint: "http://localhost:8080", + collectorID: "${POD_NAME}", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &Config{ + CollectorID: tt.collectorID, + } + cfg.Endpoint = tt.endpoint + err := xconfmap.Validate(cfg) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestConvertTLSVersion(t *testing.T) { + tests := []struct { + name string + version string + expectError bool + }{ + { + name: "TLS 1.2", + version: "1.2", + expectError: false, + }, + { + name: "TLS 1.3", + version: "1.3", + expectError: false, + }, + { + name: "TLS 1.0", + version: "1.0", + expectError: false, + }, + { + name: "TLS 1.1", + version: "1.1", + expectError: false, + }, + { + name: "invalid version", + version: "2.0", + expectError: true, + }, + { + name: "invalid format", + version: "invalid", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := convertTLSVersion(tt.version) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestCheckTLSConfig(t *testing.T) { + tests := []struct { + name string + certFile string + keyFile string + expectError bool + }{ + { + name: "empty config", + certFile: "", + keyFile: "", + expectError: false, + }, + { + name: "valid cert and key", + certFile: filepath.Join("testdata", "dummy-tls-cert-file"), + keyFile: filepath.Join("testdata", "dummy-tls-key-file"), + expectError: false, + }, + { + name: "invalid cert file", + certFile: "nonexistent-cert.pem", + keyFile: filepath.Join("testdata", "dummy-tls-key-file"), + expectError: true, + }, + { + name: "invalid key file", + certFile: filepath.Join("testdata", "dummy-tls-cert-file"), + keyFile: "nonexistent-key.pem", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tlsConfig := promConfig.TLSConfig{ + CertFile: tt.certFile, + KeyFile: tt.keyFile, + } + cfg := &Config{ + CollectorID: "collector-1", + HTTPScrapeConfig: &PromHTTPClientConfig{ + TLSConfig: tlsConfig, + }, + } + cfg.Endpoint = "http://localhost:8080" + err := xconfmap.Validate(cfg) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/receiver/prometheusreceiver/internal/targetallocator/manager.go b/receiver/prometheusreceiver/internal/targetallocator/manager.go index 97aeb410880d1..c0fc97fc55908 100644 --- a/receiver/prometheusreceiver/internal/targetallocator/manager.go +++ b/receiver/prometheusreceiver/internal/targetallocator/manager.go @@ -14,6 +14,7 @@ import ( "net/url" "os" "sort" + "sync" "sync/atomic" "syscall" "time" @@ -40,6 +41,7 @@ type Manager struct { scrapeManager *scrape.Manager discoveryManager *discovery.Manager enableNativeHistograms bool + wg sync.WaitGroup // configUpdateCount tracks how many times the config has changed, for // testing. @@ -92,7 +94,9 @@ func (m *Manager) Start(ctx context.Context, host component.Host, sm *scrape.Man if err != nil { return err } + m.wg.Add(1) go func() { + defer m.wg.Done() targetAllocatorIntervalTicker := time.NewTicker(m.cfg.Interval) for { select { @@ -115,6 +119,7 @@ func (m *Manager) Start(ctx context.Context, host component.Host, sm *scrape.Man func (m *Manager) Shutdown() { close(m.shutdown) + m.wg.Wait() } // sync request jobs from targetAllocator and update underlying receiver, if the response does not match the provided compareHash. @@ -239,7 +244,7 @@ func getScrapeConfigsResponse(httpClient *http.Client, baseURL string) (map[stri } jobToScrapeConfig := map[string]*promconfig.ScrapeConfig{} - envReplacedBody := instantiateShard(body) + envReplacedBody := instantiateShard(body, os.LookupEnv) err = yaml.Unmarshal(envReplacedBody, &jobToScrapeConfig) if err != nil { return nil, err @@ -252,8 +257,9 @@ func getScrapeConfigsResponse(httpClient *http.Client, baseURL string) (map[stri } // instantiateShard inserts the SHARD environment variable in the returned configuration -func instantiateShard(body []byte) []byte { - shard, ok := os.LookupEnv("SHARD") +func instantiateShard(body []byte, lookup func(string) (string, bool)) []byte { + shard, ok := lookup("SHARD") + if !ok { shard = "0" } diff --git a/receiver/prometheusreceiver/internal/targetallocator/manager_test.go b/receiver/prometheusreceiver/internal/targetallocator/manager_test.go index cffa11074c263..4afc9a0afc7eb 100644 --- a/receiver/prometheusreceiver/internal/targetallocator/manager_test.go +++ b/receiver/prometheusreceiver/internal/targetallocator/manager_test.go @@ -1,969 +1,195 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:build !race - package targetallocator import ( - "context" - "encoding/base64" - "encoding/json" "net/http" "net/http/httptest" - "net/url" - "strings" - "sync" - "sync/atomic" "testing" "time" "github.com/prometheus/client_golang/prometheus" - commonconfig "github.com/prometheus/common/config" - "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" promconfig "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" - promHTTP "github.com/prometheus/prometheus/discovery/http" - "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/scrape" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config/configopaque" - "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/receiver/receivertest" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metadata" ) -type mockTargetAllocator struct { - mu sync.Mutex // mu protects the fields below. - endpoints map[string][]mockTargetAllocatorResponse - accessIndex map[string]*atomic.Int32 - wg *sync.WaitGroup - srv *httptest.Server - waitIndex map[string]int -} - -type mockTargetAllocatorResponse struct { - code int - data []byte -} - -type mockTargetAllocatorResponseRaw struct { - code int - data any -} - -type hTTPSDResponse struct { - Targets []string `json:"targets"` - Labels map[model.LabelName]model.LabelValue `json:"labels"` -} - -type expectedMetricRelabelConfigTestResult struct { - JobName string - MetricRelabelRegex relabel.Regexp -} - -type expectedTestResultJobMap struct { - Targets []string - Labels model.LabelSet - MetricRelabelConfig *expectedMetricRelabelConfigTestResult - ScrapeFallbackProtocol promconfig.ScrapeProtocol -} - -type expectedTestResult struct { - empty bool - jobMap map[string]expectedTestResultJobMap -} - -func (mta *mockTargetAllocator) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - mta.mu.Lock() - defer mta.mu.Unlock() - - iptr, ok := mta.accessIndex[req.URL.Path] - if !ok { - rw.WriteHeader(http.StatusNotFound) - return +func TestNewManager(t *testing.T) { + cfg := &Config{ + Interval: 30 * time.Second, + CollectorID: "test-collector", } - index := int(iptr.Load()) - iptr.Add(1) - pages := mta.endpoints[req.URL.Path] - if index >= len(pages) { - rw.WriteHeader(http.StatusNotFound) - return + promCfg := &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + {JobName: "test-job"}, + }, } - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(pages[index].code) - _, _ = rw.Write(pages[index].data) - // release WaitGroup after all endpoints have been hit by Prometheus SD once. After that we will call them manually - wait := mta.waitIndex[req.URL.Path] - if index == wait { - mta.wg.Done() - } -} + manager := NewManager(receivertest.NewNopSettings(metadata.Type), cfg, promCfg, true) -func (mta *mockTargetAllocator) Start() { - mta.srv.Start() + assert.NotNil(t, manager) + assert.Equal(t, cfg, manager.cfg) + assert.Equal(t, promCfg, manager.promCfg) + assert.True(t, manager.enableNativeHistograms) + assert.NotNil(t, manager.shutdown) + assert.NotNil(t, manager.configUpdateCount) + assert.Len(t, manager.initialScrapeConfigs, 1) } -func (mta *mockTargetAllocator) Stop() { - mta.srv.Close() -} - -func transformTAResponseMap(rawResponses map[string][]mockTargetAllocatorResponseRaw) (map[string][]mockTargetAllocatorResponse, map[string]*atomic.Int32, error) { - responsesMap := make(map[string][]mockTargetAllocatorResponse) - responsesIndexMap := make(map[string]*atomic.Int32) - for path, responsesRaw := range rawResponses { - var responses []mockTargetAllocatorResponse - for _, responseRaw := range responsesRaw { - respBodyBytes, err := json.Marshal(responseRaw.data) - if err != nil { - return nil, nil, err - } - responses = append(responses, mockTargetAllocatorResponse{ - code: responseRaw.code, - data: respBodyBytes, - }) +func TestManagerShutdown(t *testing.T) { + // Create a mock target allocator server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + if r.URL.Path == "/scrape_configs" { + _, _ = w.Write([]byte("{}")) } - responsesMap[path] = responses - - v := &atomic.Int32{} - responsesIndexMap[path] = v - } - return responsesMap, responsesIndexMap, nil -} - -func setupMockTargetAllocator(responses Responses) (*mockTargetAllocator, error) { - responsesMap, responsesIndexMap, err := transformTAResponseMap(responses.responses) - if err != nil { - return nil, err - } + })) + defer server.Close() - mockTA := &mockTargetAllocator{ - endpoints: responsesMap, - accessIndex: responsesIndexMap, - waitIndex: responses.releaserMap, - wg: &sync.WaitGroup{}, + cfg := &Config{ + Interval: 100 * time.Millisecond, + CollectorID: "test-collector", } - mockTA.srv = httptest.NewUnstartedServer(mockTA) - mockTA.wg.Add(len(responsesMap)) + cfg.Endpoint = server.URL + promCfg, err := promconfig.Load("", nil) + require.NoError(t, err) - return mockTA, nil -} + // Create a logger with observer to capture logs + core, logs := observer.New(zapcore.InfoLevel) + logger := zap.New(core) + settings := receivertest.NewNopSettings(metadata.Type) + settings.Logger = logger -func labelSetTargetsToList(sets []model.LabelSet) []string { - result := make([]string, len(sets)) - for i, set := range sets { - address := set["__address__"] - result[i] = string(address) - } - return result -} + manager := NewManager(settings, cfg, promCfg, false) -type Responses struct { - releaserMap map[string]int - responses map[string][]mockTargetAllocatorResponseRaw -} + // Start the manager so the goroutine is running + ctx := t.Context() -func TestGetScrapeConfigHash(t *testing.T) { - jobToScrapeConfig1 := map[string]*promconfig.ScrapeConfig{} - jobToScrapeConfig1["job1"] = &promconfig.ScrapeConfig{ - JobName: "job1", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - RelabelConfigs: []*relabel.Config{ - { - SourceLabels: model.LabelNames{"a"}, - TargetLabel: "d", - Action: relabel.KeepEqual, - }, - }, - } - jobToScrapeConfig1["job2"] = &promconfig.ScrapeConfig{ - JobName: "job2", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - RelabelConfigs: []*relabel.Config{ - { - SourceLabels: model.LabelNames{"a"}, - TargetLabel: "d", - Action: relabel.KeepEqual, - }, - }, - } - jobToScrapeConfig1["job3"] = &promconfig.ScrapeConfig{ - JobName: "job3", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - RelabelConfigs: []*relabel.Config{ - { - SourceLabels: model.LabelNames{"a"}, - TargetLabel: "d", - Action: relabel.KeepEqual, - }, - }, - } - jobToScrapeConfig2 := map[string]*promconfig.ScrapeConfig{} - jobToScrapeConfig2["job2"] = &promconfig.ScrapeConfig{ - JobName: "job2", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - RelabelConfigs: []*relabel.Config{ - { - SourceLabels: model.LabelNames{"a"}, - TargetLabel: "d", - Action: relabel.KeepEqual, - }, - }, - } - jobToScrapeConfig2["job1"] = &promconfig.ScrapeConfig{ - JobName: "job1", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - RelabelConfigs: []*relabel.Config{ - { - SourceLabels: model.LabelNames{"a"}, - TargetLabel: "d", - Action: relabel.KeepEqual, - }, - }, - } - jobToScrapeConfig2["job3"] = &promconfig.ScrapeConfig{ - JobName: "job3", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - RelabelConfigs: []*relabel.Config{ - { - SourceLabels: model.LabelNames{"a"}, - TargetLabel: "d", - Action: relabel.KeepEqual, - }, - }, - } + // Initialize Prometheus managers using the same pattern as manager_test.go + promLogger := promslog.NewNopLogger() + reg := prometheus.NewRegistry() + sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) + require.NoError(t, err) + discoveryManager := discovery.NewManager(ctx, promLogger, reg, sdMetrics) + require.NotNil(t, discoveryManager) - hash1, err := getScrapeConfigHash(jobToScrapeConfig1) + scrapeManager, err := scrape.NewManager(&scrape.Options{}, promLogger, nil, nil, reg) require.NoError(t, err) + defer scrapeManager.Stop() - hash2, err := getScrapeConfigHash(jobToScrapeConfig2) + err = manager.Start(ctx, componenttest.NewNopHost(), scrapeManager, discoveryManager) require.NoError(t, err) - assert.Equal(t, hash1, hash2) + // Shutdown the manager + manager.Shutdown() + + // Wait for the shutdown to complete with a timeout + require.Eventually(t, func() bool { + // Check if the log message "Stopping target allocator" was logged + logEntries := logs.FilterMessage("Stopping target allocator") + return logEntries.Len() > 0 + }, 5*time.Second, 50*time.Millisecond, "expected shutdown log message") } -func TestTargetAllocatorJobRetrieval(t *testing.T) { - for _, tc := range []struct { - desc string - responses Responses - cfg *Config - want expectedTestResult +func TestInstantiateShard(t *testing.T) { + tests := []struct { + name string + input []byte + envVar string + expected []byte + setEnv bool }{ { - desc: "default", - responses: Responses{ - responses: map[string][]mockTargetAllocatorResponseRaw{ - "/scrape_configs": { - mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ - "job1": { - "job_name": "job1", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - "fallback_scrape_protocol": promconfig.PrometheusText1_0_0, - }, - "job2": { - "job_name": "job2", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - }}, - }, - "/jobs/job1/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - }, - "/jobs/job2/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "alertmanager", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "alertmanager", - }, - }, - }}, - }, - }, - }, - cfg: &Config{ - Interval: 10 * time.Second, - CollectorID: "collector-1", - HTTPSDConfig: &PromHTTPSDConfig{ - HTTPClientConfig: commonconfig.HTTPClientConfig{ - BasicAuth: &commonconfig.BasicAuth{ - Username: "user", - Password: "aPassword", - }, - }, - RefreshInterval: model.Duration(60 * time.Second), - }, - }, - want: expectedTestResult{ - empty: false, - jobMap: map[string]expectedTestResultJobMap{ - "job1": { - Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - ScrapeFallbackProtocol: promconfig.PrometheusText1_0_0, - }, - "job2": { - Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "alertmanager", - }, - ScrapeFallbackProtocol: promconfig.PrometheusText0_0_4, // Tests default value - }, - }, - }, - }, - { - desc: "update labels and targets", - responses: Responses{ - responses: map[string][]mockTargetAllocatorResponseRaw{ - "/scrape_configs": { - mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ - "job1": { - "job_name": "job1", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - "job2": { - "job_name": "job2", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - }}, - }, - "/jobs/job1/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - "test": "aTest", - }, - }, - }}, - }, - "/jobs/job2/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "alertmanager", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - }, - }, - }}, - }, - }, - }, - cfg: &Config{ - Interval: 10 * time.Second, - CollectorID: "collector-1", - HTTPSDConfig: &PromHTTPSDConfig{ - HTTPClientConfig: commonconfig.HTTPClientConfig{}, - RefreshInterval: model.Duration(60 * time.Second), - }, - }, - want: expectedTestResult{ - empty: false, - jobMap: map[string]expectedTestResultJobMap{ - "job1": { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - "test": "aTest", - }, - }, - "job2": { - Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - }, - }, - }, - }, + name: "shard environment variable set", + input: []byte("replica-$(SHARD)"), + envVar: "5", + expected: []byte("replica-5"), + setEnv: true, }, { - desc: "update job list", - responses: Responses{ - releaserMap: map[string]int{ - "/scrape_configs": 1, - }, - responses: map[string][]mockTargetAllocatorResponseRaw{ - "/scrape_configs": { - mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ - "job1": { - "job_name": "job1", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - "job2": { - "job_name": "job2", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ - "job1": { - "job_name": "job1", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - "job3": { - "job_name": "job3", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - }}, - }, - "/jobs/job1/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - }, - "/jobs/job3/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "alertmanager", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "alertmanager", - }, - }, - }}, - }, - }, - }, - cfg: &Config{ - Interval: 10 * time.Second, - CollectorID: "collector-1", - HTTPSDConfig: &PromHTTPSDConfig{ - HTTPClientConfig: commonconfig.HTTPClientConfig{}, - RefreshInterval: model.Duration(60 * time.Second), - }, - }, - want: expectedTestResult{ - empty: false, - jobMap: map[string]expectedTestResultJobMap{ - "job1": { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - "job3": { - Targets: []string{"10.0.40.3:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "alertmanager", - }, - }, - }, - }, + name: "shard environment variable not set", + input: []byte("replica-$(SHARD)"), + expected: []byte("replica-0"), + setEnv: false, }, { - desc: "endpoint is not reachable", - responses: Responses{ - releaserMap: map[string]int{ - "/scrape_configs": 1, // we are too fast if we ignore the first wait a tick - }, - responses: map[string][]mockTargetAllocatorResponseRaw{ - "/scrape_configs": { - mockTargetAllocatorResponseRaw{code: 404, data: map[string]map[string]any{}}, - mockTargetAllocatorResponseRaw{code: 404, data: map[string]map[string]any{}}, - }, - }, - }, - cfg: &Config{ - Interval: 50 * time.Millisecond, - CollectorID: "collector-1", - HTTPSDConfig: &PromHTTPSDConfig{ - HTTPClientConfig: commonconfig.HTTPClientConfig{}, - RefreshInterval: model.Duration(60 * time.Second), - }, - }, - want: expectedTestResult{ - empty: true, - jobMap: map[string]expectedTestResultJobMap{}, - }, + name: "multiple shard placeholders", + input: []byte("job-$(SHARD)-replica-$(SHARD)"), + envVar: "3", + expected: []byte("job-3-replica-3"), + setEnv: true, }, { - desc: "update metric relabel config regex", - responses: Responses{ - releaserMap: map[string]int{ - "/scrape_configs": 1, - }, - responses: map[string][]mockTargetAllocatorResponseRaw{ - "/scrape_configs": { - mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ - "job1": { - "job_name": "job1", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "metric_relabel_configs": []map[string]string{ - { - "separator": ";", - "regex": "regex1", - "action": "keep", - }, - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ - "job1": { - "job_name": "job1", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "metric_relabel_configs": []map[string]string{ - { - "separator": ";", - "regex": "regex2", - "action": "keep", - }, - }, - }, - }}, - }, - "/jobs/job1/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - }, - }, - }, - cfg: &Config{ - Interval: 10 * time.Second, - CollectorID: "collector-1", - HTTPSDConfig: &PromHTTPSDConfig{ - HTTPClientConfig: commonconfig.HTTPClientConfig{}, - RefreshInterval: model.Duration(60 * time.Second), - }, - }, - want: expectedTestResult{ - empty: false, - jobMap: map[string]expectedTestResultJobMap{ - "job1": { - Targets: []string{"localhost:9090"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - MetricRelabelConfig: &expectedMetricRelabelConfigTestResult{ - JobName: "job1", - MetricRelabelRegex: relabel.MustNewRegexp("regex2"), - }, - }, - }, - }, + name: "no shard placeholder", + input: []byte("static-config"), + expected: []byte("static-config"), + setEnv: false, }, - } { - t.Run(tc.desc, func(t *testing.T) { - ctx := t.Context() - - allocator, err := setupMockTargetAllocator(tc.responses) - require.NoError(t, err, "Failed to create allocator", tc.responses) - - allocator.Start() - defer allocator.Stop() - - tc.cfg.Endpoint = allocator.srv.URL // set service URL with the automatic generated one - scrapeManager, discoveryManager := initPrometheusManagers(ctx, t) - - baseCfg, err := promconfig.Load("", nil) - require.NoError(t, err) - manager := NewManager(receivertest.NewNopSettings(metadata.Type), tc.cfg, baseCfg, false) - require.NoError(t, manager.Start(ctx, componenttest.NewNopHost(), scrapeManager, discoveryManager)) - - allocator.wg.Wait() - - providers := discoveryManager.Providers() - if tc.want.empty { - // if no base config is supplied and the job retrieval fails then no configuration should be found - require.Empty(t, providers) - return - } - - require.NotNil(t, providers) - - for _, provider := range providers { - require.IsType(t, &promHTTP.Discovery{}, provider.Discoverer()) - httpDiscovery := provider.Discoverer().(*promHTTP.Discovery) - refresh, err := httpDiscovery.Refresh(ctx) - require.NoError(t, err) - - // are http configs applied? - sdConfig := provider.Config().(*promHTTP.SDConfig) - require.Equal(t, tc.cfg.HTTPSDConfig.HTTPClientConfig, sdConfig.HTTPClientConfig) - - for _, group := range refresh { - found := false - for job, s := range tc.want.jobMap { - // find correct job to compare to. - if !strings.Contains(group.Source, job) { - continue - } - // compare targets - require.Equal(t, s.Targets, labelSetTargetsToList(group.Targets)) - - // compare labels and add __meta_url as this label gets automatically added by the SD. - // which is identical to the source url - s.Labels["__meta_url"] = model.LabelValue(sdConfig.URL) - require.Equal(t, s.Labels, group.Labels) - - // The manager may not be done processing the Refresh call by the - // time we check the value of the ScrapeConfig. - require.Eventually(t, func() bool { - v := manager.configUpdateCount.Load() - return v >= int64(len(tc.responses.responses["/scrape_configs"])) - }, 5*time.Second, 250*time.Millisecond) - - if s.MetricRelabelConfig != nil { - for _, sc := range manager.promCfg.ScrapeConfigs { - if sc.JobName == s.MetricRelabelConfig.JobName { - for _, mc := range sc.MetricRelabelConfigs { - require.Equal(t, s.MetricRelabelConfig.MetricRelabelRegex, mc.Regex) - } - } - } - } - - if s.ScrapeFallbackProtocol != "" { - for _, sc := range manager.promCfg.ScrapeConfigs { - if sc.JobName == job { - require.Equal(t, sc.ScrapeFallbackProtocol, s.ScrapeFallbackProtocol) - } - } - } + } - found = true - } - require.True(t, found, "Returned job is not defined in expected values", group) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var lookup func(string) (string, bool) + if tt.setEnv { + lookup = func(_ string) (string, bool) { + return tt.envVar, true + } + } else { + lookup = func(_ string) (string, bool) { + return "", false } } + + result := instantiateShard(tt.input, lookup) + assert.Equal(t, tt.expected, result) }) } } -func TestConfigureSDHTTPClientConfigFromTA(t *testing.T) { - ta := &Config{} - ta.TLS = configtls.ClientConfig{ - InsecureSkipVerify: true, - ServerName: "test.server", - Config: configtls.Config{ - CAFile: "/path/to/ca", - CertFile: "/path/to/cert", - KeyFile: "/path/to/key", - CAPem: configopaque.String(base64.StdEncoding.EncodeToString([]byte("test-ca"))), - CertPem: configopaque.String(base64.StdEncoding.EncodeToString([]byte("test-cert"))), - KeyPem: configopaque.String(base64.StdEncoding.EncodeToString([]byte("test-key"))), - MinVersion: "1.2", - MaxVersion: "1.3", - }, - } - ta.ProxyURL = "http://proxy.test" - - httpSD := &promHTTP.SDConfig{RefreshInterval: model.Duration(30 * time.Second)} +func TestGetScrapeConfigsResponse_InvalidURL(t *testing.T) { + httpClient := &http.Client{} + invalidURL := "://invalid-url" - err := configureSDHTTPClientConfigFromTA(httpSD, ta) - - assert.NoError(t, err) - - assert.False(t, httpSD.HTTPClientConfig.FollowRedirects) - assert.True(t, httpSD.HTTPClientConfig.TLSConfig.InsecureSkipVerify) - assert.Equal(t, "test.server", httpSD.HTTPClientConfig.TLSConfig.ServerName) - assert.Equal(t, "/path/to/ca", httpSD.HTTPClientConfig.TLSConfig.CAFile) - assert.Equal(t, "/path/to/cert", httpSD.HTTPClientConfig.TLSConfig.CertFile) - assert.Equal(t, "/path/to/key", httpSD.HTTPClientConfig.TLSConfig.KeyFile) - assert.Equal(t, "test-ca", httpSD.HTTPClientConfig.TLSConfig.CA) - assert.Equal(t, "test-cert", httpSD.HTTPClientConfig.TLSConfig.Cert) - assert.Equal(t, commonconfig.Secret("test-key"), httpSD.HTTPClientConfig.TLSConfig.Key) - assert.Equal(t, commonconfig.TLSVersions["TLS12"], httpSD.HTTPClientConfig.TLSConfig.MinVersion) - assert.Equal(t, commonconfig.TLSVersions["TLS13"], httpSD.HTTPClientConfig.TLSConfig.MaxVersion) - - parsedProxyURL, _ := url.Parse("http://proxy.test") - assert.Equal(t, commonconfig.URL{URL: parsedProxyURL}, httpSD.HTTPClientConfig.ProxyURL) + _, err := getScrapeConfigsResponse(httpClient, invalidURL) + assert.Error(t, err) +} - // Test case with empty TargetAllocator - emptyTA := &Config{} - emptyHTTPSD := &promHTTP.SDConfig{RefreshInterval: model.Duration(30 * time.Second)} +func TestGetScrapeConfigsResponse_HTTPError(t *testing.T) { + // Create a test server that returns an error + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer server.Close() - err = configureSDHTTPClientConfigFromTA(emptyHTTPSD, emptyTA) + httpClient := &http.Client{} + config, err := getScrapeConfigsResponse(httpClient, server.URL) + // This should succeed in reading the response but return empty config assert.NoError(t, err) + assert.Empty(t, config) } -func TestManagerSyncWithInitialScrapeConfigs(t *testing.T) { - ctx := t.Context() - initialScrapeConfigs := []*promconfig.ScrapeConfig{ - { - JobName: "job1", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - }, - { - JobName: "job2", - HonorTimestamps: true, - ScrapeInterval: model.Duration(30 * time.Second), - ScrapeTimeout: model.Duration(30 * time.Second), - MetricsPath: "/metrics", - Scheme: "http", - }, - } - - // Mock target allocator response - mockResponse := Responses{ - responses: map[string][]mockTargetAllocatorResponseRaw{ - "/scrape_configs": { - mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ - "job1": { - "job_name": "job3", - "scrape_interval": "30s", - "scrape_timeout": "30s", - "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, - "metrics_path": "/metrics", - "scheme": "http", - "relabel_configs": nil, - "metric_relabel_configs": nil, - }, - }}, - }, - "/jobs/job1/targets": { - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ - { - Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_datacenter": "london", - "__meta_prometheus_job": "node", - }, - }, - }}, - }, - }, - } - - cfg := &Config{ - Interval: 10 * time.Second, - CollectorID: "collector-1", - HTTPSDConfig: &PromHTTPSDConfig{ - HTTPClientConfig: commonconfig.HTTPClientConfig{}, - RefreshInterval: model.Duration(60 * time.Second), - }, - } - - allocator, err := setupMockTargetAllocator(mockResponse) - require.NoError(t, err, "Failed to create allocator") +func TestGetScrapeConfigsResponse_InvalidYAML(t *testing.T) { + // Create a test server that returns invalid YAML + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("invalid: yaml: content: [[[")) + })) + defer server.Close() - allocator.Start() - defer allocator.Stop() - cfg.Endpoint = allocator.srv.URL // set service URL with the automatic generated one - scrapeManager, discoveryManager := initPrometheusManagers(ctx, t) + httpClient := &http.Client{} - baseCfg, err := promconfig.Load("", nil) - require.NoError(t, err) - baseCfg.ScrapeConfigs = initialScrapeConfigs - manager := NewManager(receivertest.NewNopSettings(metadata.Type), cfg, baseCfg, false) - require.NoError(t, manager.Start(ctx, componenttest.NewNopHost(), scrapeManager, discoveryManager)) - - allocator.wg.Wait() - - providers := discoveryManager.Providers() - - require.NotNil(t, providers) - require.Len(t, providers, 2) - require.IsType(t, &promHTTP.Discovery{}, providers[1].Discoverer()) - - require.Len(t, manager.promCfg.ScrapeConfigs, 3) - require.Equal(t, "job1", manager.promCfg.ScrapeConfigs[0].JobName) - require.Equal(t, "job2", manager.promCfg.ScrapeConfigs[1].JobName) - require.Equal(t, "job3", manager.promCfg.ScrapeConfigs[2].JobName) -} - -func initPrometheusManagers(ctx context.Context, t *testing.T) (*scrape.Manager, *discovery.Manager) { - logger := promslog.NewNopLogger() - reg := prometheus.NewRegistry() - sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) - require.NoError(t, err) - discoveryManager := discovery.NewManager(ctx, logger, reg, sdMetrics) - require.NotNil(t, discoveryManager) - - scrapeManager, err := scrape.NewManager(&scrape.Options{}, logger, nil, nil, reg) - require.NoError(t, err) - return scrapeManager, discoveryManager + _, err := getScrapeConfigsResponse(httpClient, server.URL) + assert.Error(t, err) } diff --git a/receiver/prometheusreceiver/internal/targetallocator/manager_unsafe_test.go b/receiver/prometheusreceiver/internal/targetallocator/manager_unsafe_test.go new file mode 100644 index 0000000000000..cffa11074c263 --- /dev/null +++ b/receiver/prometheusreceiver/internal/targetallocator/manager_unsafe_test.go @@ -0,0 +1,969 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !race + +package targetallocator + +import ( + "context" + "encoding/base64" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + commonconfig "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" + promconfig "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + promHTTP "github.com/prometheus/prometheus/discovery/http" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/scrape" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metadata" +) + +type mockTargetAllocator struct { + mu sync.Mutex // mu protects the fields below. + endpoints map[string][]mockTargetAllocatorResponse + accessIndex map[string]*atomic.Int32 + wg *sync.WaitGroup + srv *httptest.Server + waitIndex map[string]int +} + +type mockTargetAllocatorResponse struct { + code int + data []byte +} + +type mockTargetAllocatorResponseRaw struct { + code int + data any +} + +type hTTPSDResponse struct { + Targets []string `json:"targets"` + Labels map[model.LabelName]model.LabelValue `json:"labels"` +} + +type expectedMetricRelabelConfigTestResult struct { + JobName string + MetricRelabelRegex relabel.Regexp +} + +type expectedTestResultJobMap struct { + Targets []string + Labels model.LabelSet + MetricRelabelConfig *expectedMetricRelabelConfigTestResult + ScrapeFallbackProtocol promconfig.ScrapeProtocol +} + +type expectedTestResult struct { + empty bool + jobMap map[string]expectedTestResultJobMap +} + +func (mta *mockTargetAllocator) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + mta.mu.Lock() + defer mta.mu.Unlock() + + iptr, ok := mta.accessIndex[req.URL.Path] + if !ok { + rw.WriteHeader(http.StatusNotFound) + return + } + index := int(iptr.Load()) + iptr.Add(1) + pages := mta.endpoints[req.URL.Path] + if index >= len(pages) { + rw.WriteHeader(http.StatusNotFound) + return + } + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(pages[index].code) + _, _ = rw.Write(pages[index].data) + + // release WaitGroup after all endpoints have been hit by Prometheus SD once. After that we will call them manually + wait := mta.waitIndex[req.URL.Path] + if index == wait { + mta.wg.Done() + } +} + +func (mta *mockTargetAllocator) Start() { + mta.srv.Start() +} + +func (mta *mockTargetAllocator) Stop() { + mta.srv.Close() +} + +func transformTAResponseMap(rawResponses map[string][]mockTargetAllocatorResponseRaw) (map[string][]mockTargetAllocatorResponse, map[string]*atomic.Int32, error) { + responsesMap := make(map[string][]mockTargetAllocatorResponse) + responsesIndexMap := make(map[string]*atomic.Int32) + for path, responsesRaw := range rawResponses { + var responses []mockTargetAllocatorResponse + for _, responseRaw := range responsesRaw { + respBodyBytes, err := json.Marshal(responseRaw.data) + if err != nil { + return nil, nil, err + } + responses = append(responses, mockTargetAllocatorResponse{ + code: responseRaw.code, + data: respBodyBytes, + }) + } + responsesMap[path] = responses + + v := &atomic.Int32{} + responsesIndexMap[path] = v + } + return responsesMap, responsesIndexMap, nil +} + +func setupMockTargetAllocator(responses Responses) (*mockTargetAllocator, error) { + responsesMap, responsesIndexMap, err := transformTAResponseMap(responses.responses) + if err != nil { + return nil, err + } + + mockTA := &mockTargetAllocator{ + endpoints: responsesMap, + accessIndex: responsesIndexMap, + waitIndex: responses.releaserMap, + wg: &sync.WaitGroup{}, + } + mockTA.srv = httptest.NewUnstartedServer(mockTA) + mockTA.wg.Add(len(responsesMap)) + + return mockTA, nil +} + +func labelSetTargetsToList(sets []model.LabelSet) []string { + result := make([]string, len(sets)) + for i, set := range sets { + address := set["__address__"] + result[i] = string(address) + } + return result +} + +type Responses struct { + releaserMap map[string]int + responses map[string][]mockTargetAllocatorResponseRaw +} + +func TestGetScrapeConfigHash(t *testing.T) { + jobToScrapeConfig1 := map[string]*promconfig.ScrapeConfig{} + jobToScrapeConfig1["job1"] = &promconfig.ScrapeConfig{ + JobName: "job1", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{"a"}, + TargetLabel: "d", + Action: relabel.KeepEqual, + }, + }, + } + jobToScrapeConfig1["job2"] = &promconfig.ScrapeConfig{ + JobName: "job2", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{"a"}, + TargetLabel: "d", + Action: relabel.KeepEqual, + }, + }, + } + jobToScrapeConfig1["job3"] = &promconfig.ScrapeConfig{ + JobName: "job3", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{"a"}, + TargetLabel: "d", + Action: relabel.KeepEqual, + }, + }, + } + jobToScrapeConfig2 := map[string]*promconfig.ScrapeConfig{} + jobToScrapeConfig2["job2"] = &promconfig.ScrapeConfig{ + JobName: "job2", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{"a"}, + TargetLabel: "d", + Action: relabel.KeepEqual, + }, + }, + } + jobToScrapeConfig2["job1"] = &promconfig.ScrapeConfig{ + JobName: "job1", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{"a"}, + TargetLabel: "d", + Action: relabel.KeepEqual, + }, + }, + } + jobToScrapeConfig2["job3"] = &promconfig.ScrapeConfig{ + JobName: "job3", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + RelabelConfigs: []*relabel.Config{ + { + SourceLabels: model.LabelNames{"a"}, + TargetLabel: "d", + Action: relabel.KeepEqual, + }, + }, + } + + hash1, err := getScrapeConfigHash(jobToScrapeConfig1) + require.NoError(t, err) + + hash2, err := getScrapeConfigHash(jobToScrapeConfig2) + require.NoError(t, err) + + assert.Equal(t, hash1, hash2) +} + +func TestTargetAllocatorJobRetrieval(t *testing.T) { + for _, tc := range []struct { + desc string + responses Responses + cfg *Config + want expectedTestResult + }{ + { + desc: "default", + responses: Responses{ + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + "fallback_scrape_protocol": promconfig.PrometheusText1_0_0, + }, + "job2": { + "job_name": "job2", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + }, + "/jobs/job2/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }, + }, + }}, + }, + }, + }, + cfg: &Config{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &PromHTTPSDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{ + BasicAuth: &commonconfig.BasicAuth{ + Username: "user", + Password: "aPassword", + }, + }, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + want: expectedTestResult{ + empty: false, + jobMap: map[string]expectedTestResultJobMap{ + "job1": { + Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + ScrapeFallbackProtocol: promconfig.PrometheusText1_0_0, + }, + "job2": { + Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }, + ScrapeFallbackProtocol: promconfig.PrometheusText0_0_4, // Tests default value + }, + }, + }, + }, + { + desc: "update labels and targets", + responses: Responses{ + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + "job2": { + "job_name": "job2", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + "test": "aTest", + }, + }, + }}, + }, + "/jobs/job2/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + }, + }, + }}, + }, + }, + }, + cfg: &Config{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &PromHTTPSDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + want: expectedTestResult{ + empty: false, + jobMap: map[string]expectedTestResultJobMap{ + "job1": { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + "test": "aTest", + }, + }, + "job2": { + Targets: []string{"10.0.40.2:9100", "10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + }, + }, + }, + }, + }, + { + desc: "update job list", + responses: Responses{ + releaserMap: map[string]int{ + "/scrape_configs": 1, + }, + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + "job2": { + "job_name": "job2", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + "job3": { + "job_name": "job3", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + }, + "/jobs/job3/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }, + }, + }}, + }, + }, + }, + cfg: &Config{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &PromHTTPSDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + want: expectedTestResult{ + empty: false, + jobMap: map[string]expectedTestResultJobMap{ + "job1": { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + "job3": { + Targets: []string{"10.0.40.3:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "alertmanager", + }, + }, + }, + }, + }, + { + desc: "endpoint is not reachable", + responses: Responses{ + releaserMap: map[string]int{ + "/scrape_configs": 1, // we are too fast if we ignore the first wait a tick + }, + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 404, data: map[string]map[string]any{}}, + mockTargetAllocatorResponseRaw{code: 404, data: map[string]map[string]any{}}, + }, + }, + }, + cfg: &Config{ + Interval: 50 * time.Millisecond, + CollectorID: "collector-1", + HTTPSDConfig: &PromHTTPSDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + want: expectedTestResult{ + empty: true, + jobMap: map[string]expectedTestResultJobMap{}, + }, + }, + { + desc: "update metric relabel config regex", + responses: Responses{ + releaserMap: map[string]int{ + "/scrape_configs": 1, + }, + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "metric_relabel_configs": []map[string]string{ + { + "separator": ";", + "regex": "regex1", + "action": "keep", + }, + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ + "job1": { + "job_name": "job1", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "metric_relabel_configs": []map[string]string{ + { + "separator": ";", + "regex": "regex2", + "action": "keep", + }, + }, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + }, + }, + }, + cfg: &Config{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &PromHTTPSDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + want: expectedTestResult{ + empty: false, + jobMap: map[string]expectedTestResultJobMap{ + "job1": { + Targets: []string{"localhost:9090"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + MetricRelabelConfig: &expectedMetricRelabelConfigTestResult{ + JobName: "job1", + MetricRelabelRegex: relabel.MustNewRegexp("regex2"), + }, + }, + }, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + ctx := t.Context() + + allocator, err := setupMockTargetAllocator(tc.responses) + require.NoError(t, err, "Failed to create allocator", tc.responses) + + allocator.Start() + defer allocator.Stop() + + tc.cfg.Endpoint = allocator.srv.URL // set service URL with the automatic generated one + scrapeManager, discoveryManager := initPrometheusManagers(ctx, t) + + baseCfg, err := promconfig.Load("", nil) + require.NoError(t, err) + manager := NewManager(receivertest.NewNopSettings(metadata.Type), tc.cfg, baseCfg, false) + require.NoError(t, manager.Start(ctx, componenttest.NewNopHost(), scrapeManager, discoveryManager)) + + allocator.wg.Wait() + + providers := discoveryManager.Providers() + if tc.want.empty { + // if no base config is supplied and the job retrieval fails then no configuration should be found + require.Empty(t, providers) + return + } + + require.NotNil(t, providers) + + for _, provider := range providers { + require.IsType(t, &promHTTP.Discovery{}, provider.Discoverer()) + httpDiscovery := provider.Discoverer().(*promHTTP.Discovery) + refresh, err := httpDiscovery.Refresh(ctx) + require.NoError(t, err) + + // are http configs applied? + sdConfig := provider.Config().(*promHTTP.SDConfig) + require.Equal(t, tc.cfg.HTTPSDConfig.HTTPClientConfig, sdConfig.HTTPClientConfig) + + for _, group := range refresh { + found := false + for job, s := range tc.want.jobMap { + // find correct job to compare to. + if !strings.Contains(group.Source, job) { + continue + } + // compare targets + require.Equal(t, s.Targets, labelSetTargetsToList(group.Targets)) + + // compare labels and add __meta_url as this label gets automatically added by the SD. + // which is identical to the source url + s.Labels["__meta_url"] = model.LabelValue(sdConfig.URL) + require.Equal(t, s.Labels, group.Labels) + + // The manager may not be done processing the Refresh call by the + // time we check the value of the ScrapeConfig. + require.Eventually(t, func() bool { + v := manager.configUpdateCount.Load() + return v >= int64(len(tc.responses.responses["/scrape_configs"])) + }, 5*time.Second, 250*time.Millisecond) + + if s.MetricRelabelConfig != nil { + for _, sc := range manager.promCfg.ScrapeConfigs { + if sc.JobName == s.MetricRelabelConfig.JobName { + for _, mc := range sc.MetricRelabelConfigs { + require.Equal(t, s.MetricRelabelConfig.MetricRelabelRegex, mc.Regex) + } + } + } + } + + if s.ScrapeFallbackProtocol != "" { + for _, sc := range manager.promCfg.ScrapeConfigs { + if sc.JobName == job { + require.Equal(t, sc.ScrapeFallbackProtocol, s.ScrapeFallbackProtocol) + } + } + } + + found = true + } + require.True(t, found, "Returned job is not defined in expected values", group) + } + } + }) + } +} + +func TestConfigureSDHTTPClientConfigFromTA(t *testing.T) { + ta := &Config{} + ta.TLS = configtls.ClientConfig{ + InsecureSkipVerify: true, + ServerName: "test.server", + Config: configtls.Config{ + CAFile: "/path/to/ca", + CertFile: "/path/to/cert", + KeyFile: "/path/to/key", + CAPem: configopaque.String(base64.StdEncoding.EncodeToString([]byte("test-ca"))), + CertPem: configopaque.String(base64.StdEncoding.EncodeToString([]byte("test-cert"))), + KeyPem: configopaque.String(base64.StdEncoding.EncodeToString([]byte("test-key"))), + MinVersion: "1.2", + MaxVersion: "1.3", + }, + } + ta.ProxyURL = "http://proxy.test" + + httpSD := &promHTTP.SDConfig{RefreshInterval: model.Duration(30 * time.Second)} + + err := configureSDHTTPClientConfigFromTA(httpSD, ta) + + assert.NoError(t, err) + + assert.False(t, httpSD.HTTPClientConfig.FollowRedirects) + assert.True(t, httpSD.HTTPClientConfig.TLSConfig.InsecureSkipVerify) + assert.Equal(t, "test.server", httpSD.HTTPClientConfig.TLSConfig.ServerName) + assert.Equal(t, "/path/to/ca", httpSD.HTTPClientConfig.TLSConfig.CAFile) + assert.Equal(t, "/path/to/cert", httpSD.HTTPClientConfig.TLSConfig.CertFile) + assert.Equal(t, "/path/to/key", httpSD.HTTPClientConfig.TLSConfig.KeyFile) + assert.Equal(t, "test-ca", httpSD.HTTPClientConfig.TLSConfig.CA) + assert.Equal(t, "test-cert", httpSD.HTTPClientConfig.TLSConfig.Cert) + assert.Equal(t, commonconfig.Secret("test-key"), httpSD.HTTPClientConfig.TLSConfig.Key) + assert.Equal(t, commonconfig.TLSVersions["TLS12"], httpSD.HTTPClientConfig.TLSConfig.MinVersion) + assert.Equal(t, commonconfig.TLSVersions["TLS13"], httpSD.HTTPClientConfig.TLSConfig.MaxVersion) + + parsedProxyURL, _ := url.Parse("http://proxy.test") + assert.Equal(t, commonconfig.URL{URL: parsedProxyURL}, httpSD.HTTPClientConfig.ProxyURL) + + // Test case with empty TargetAllocator + emptyTA := &Config{} + emptyHTTPSD := &promHTTP.SDConfig{RefreshInterval: model.Duration(30 * time.Second)} + + err = configureSDHTTPClientConfigFromTA(emptyHTTPSD, emptyTA) + + assert.NoError(t, err) +} + +func TestManagerSyncWithInitialScrapeConfigs(t *testing.T) { + ctx := t.Context() + initialScrapeConfigs := []*promconfig.ScrapeConfig{ + { + JobName: "job1", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + }, + { + JobName: "job2", + HonorTimestamps: true, + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(30 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + }, + } + + // Mock target allocator response + mockResponse := Responses{ + responses: map[string][]mockTargetAllocatorResponseRaw{ + "/scrape_configs": { + mockTargetAllocatorResponseRaw{code: 200, data: map[string]map[string]any{ + "job1": { + "job_name": "job3", + "scrape_interval": "30s", + "scrape_timeout": "30s", + "scrape_protocols": []string{"OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"}, + "metrics_path": "/metrics", + "scheme": "http", + "relabel_configs": nil, + "metric_relabel_configs": nil, + }, + }}, + }, + "/jobs/job1/targets": { + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + mockTargetAllocatorResponseRaw{code: 200, data: []hTTPSDResponse{ + { + Targets: []string{"localhost:9090", "10.0.10.3:9100", "10.0.10.4:9100", "10.0.10.5:9100"}, + Labels: map[model.LabelName]model.LabelValue{ + "__meta_datacenter": "london", + "__meta_prometheus_job": "node", + }, + }, + }}, + }, + }, + } + + cfg := &Config{ + Interval: 10 * time.Second, + CollectorID: "collector-1", + HTTPSDConfig: &PromHTTPSDConfig{ + HTTPClientConfig: commonconfig.HTTPClientConfig{}, + RefreshInterval: model.Duration(60 * time.Second), + }, + } + + allocator, err := setupMockTargetAllocator(mockResponse) + require.NoError(t, err, "Failed to create allocator") + + allocator.Start() + defer allocator.Stop() + cfg.Endpoint = allocator.srv.URL // set service URL with the automatic generated one + scrapeManager, discoveryManager := initPrometheusManagers(ctx, t) + + baseCfg, err := promconfig.Load("", nil) + require.NoError(t, err) + baseCfg.ScrapeConfigs = initialScrapeConfigs + manager := NewManager(receivertest.NewNopSettings(metadata.Type), cfg, baseCfg, false) + require.NoError(t, manager.Start(ctx, componenttest.NewNopHost(), scrapeManager, discoveryManager)) + + allocator.wg.Wait() + + providers := discoveryManager.Providers() + + require.NotNil(t, providers) + require.Len(t, providers, 2) + require.IsType(t, &promHTTP.Discovery{}, providers[1].Discoverer()) + + require.Len(t, manager.promCfg.ScrapeConfigs, 3) + require.Equal(t, "job1", manager.promCfg.ScrapeConfigs[0].JobName) + require.Equal(t, "job2", manager.promCfg.ScrapeConfigs[1].JobName) + require.Equal(t, "job3", manager.promCfg.ScrapeConfigs[2].JobName) +} + +func initPrometheusManagers(ctx context.Context, t *testing.T) (*scrape.Manager, *discovery.Manager) { + logger := promslog.NewNopLogger() + reg := prometheus.NewRegistry() + sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) + require.NoError(t, err) + discoveryManager := discovery.NewManager(ctx, logger, reg, sdMetrics) + require.NotNil(t, discoveryManager) + + scrapeManager, err := scrape.NewManager(&scrape.Options{}, logger, nil, nil, reg) + require.NoError(t, err) + return scrapeManager, discoveryManager +} From 0ffdf76aff8f2f63db8a03bf49022725cfa979b1 Mon Sep 17 00:00:00 2001 From: "Mengyi Zhou (bjrara)" Date: Thu, 27 Nov 2025 00:33:40 -0800 Subject: [PATCH 27/41] [receiver/awsxrayreceiver] fix span kind when translating segment with parent ID (#44404) #### Description This PR fixes a translation bug in awsxrayreceiver that when segment is received with parent ID present, it is incorrectly translated to span with `INTERNAL` span kind. #### Link to tracking issue Fixes #### Testing Unit test #### Documentation --- .chloggen/awsxrayreceiver-fix.yaml | 27 ++++++++++++ .../aws/xray/testdata/segmentWithParentId.txt | 7 +++ .../internal/translator/translator.go | 20 +++++---- .../internal/translator/translator_test.go | 43 +++++++++++++++++++ 4 files changed, 89 insertions(+), 8 deletions(-) create mode 100644 .chloggen/awsxrayreceiver-fix.yaml create mode 100644 internal/aws/xray/testdata/segmentWithParentId.txt diff --git a/.chloggen/awsxrayreceiver-fix.yaml b/.chloggen/awsxrayreceiver-fix.yaml new file mode 100644 index 0000000000000..919a1ca6f5c15 --- /dev/null +++ b/.chloggen/awsxrayreceiver-fix.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: receiver/awsxray + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix incorrect span kind when translating X-Ray segment to trace span with parent ID + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44404] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/internal/aws/xray/testdata/segmentWithParentId.txt b/internal/aws/xray/testdata/segmentWithParentId.txt new file mode 100644 index 0000000000000..093f25b2a80cb --- /dev/null +++ b/internal/aws/xray/testdata/segmentWithParentId.txt @@ -0,0 +1,7 @@ +{ + "trace_id": "1-5f187253-6a106696d56b1f4ef9eba2ed", + "id": "5cc4a447f5d4d696", + "name": "segment", + "start_time": 1595437651.680097, + "parent_id": "bda182a644eee9b3" +} \ No newline at end of file diff --git a/receiver/awsxrayreceiver/internal/translator/translator.go b/receiver/awsxrayreceiver/internal/translator/translator.go index ae013e5255431..ae7b449d5b8cc 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator.go +++ b/receiver/awsxrayreceiver/internal/translator/translator.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "encoding/json" "errors" + "strings" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" @@ -65,7 +66,10 @@ func ToTraces(rawSeg []byte, recorder telemetry.Recorder) (ptrace.Traces, int, e // TraceID of the root segment in because embedded subsegments // do not have that information, but it's needed after we flatten // the embedded subsegment to generate independent child spans. - _, err = segToSpans(seg, seg.TraceID, nil, spans) + // Sometimes, subsegments are sent separately in an async workflow, + // check segment type to determine the proper span kind. + isSubsegment := seg.ParentID != nil && seg.Type != nil && strings.EqualFold(*seg.Type, "subsegment") + _, err = segToSpans(seg, seg.TraceID, nil, isSubsegment, spans) if err != nil { recorder.RecordSegmentsRejected(count) return ptrace.Traces{}, count, err @@ -74,10 +78,10 @@ func ToTraces(rawSeg []byte, recorder telemetry.Recorder) (ptrace.Traces, int, e return traceData, count, nil } -func segToSpans(seg *awsxray.Segment, traceID, parentID *string, spans ptrace.SpanSlice) (ptrace.Span, error) { +func segToSpans(seg *awsxray.Segment, traceID, parentID *string, isSubsegment bool, spans ptrace.SpanSlice) (ptrace.Span, error) { span := spans.AppendEmpty() - err := populateSpan(seg, traceID, parentID, span) + err := populateSpan(seg, traceID, parentID, isSubsegment, span) if err != nil { return ptrace.Span{}, err } @@ -86,7 +90,7 @@ func segToSpans(seg *awsxray.Segment, traceID, parentID *string, spans ptrace.Sp for i := range seg.Subsegments { s := &seg.Subsegments[i] populatedChildSpan, err = segToSpans(s, - traceID, seg.ID, + traceID, seg.ID, true, spans) if err != nil { return ptrace.Span{}, err @@ -110,7 +114,7 @@ func segToSpans(seg *awsxray.Segment, traceID, parentID *string, spans ptrace.Sp return span, nil } -func populateSpan(seg *awsxray.Segment, traceID, parentID *string, span ptrace.Span) error { +func populateSpan(seg *awsxray.Segment, traceID, parentID *string, isSubsegment bool, span ptrace.Span) error { attrs := span.Attributes() attrs.Clear() attrs.EnsureCapacity(initAttrCapacity) @@ -157,11 +161,11 @@ func populateSpan(seg *awsxray.Segment, traceID, parentID *string, span ptrace.S span.SetTraceID(traceIDBytes) span.SetSpanID(spanIDBytes) - + if !isSubsegment { + span.SetKind(ptrace.SpanKindServer) + } if parentIDBytes != [8]byte{} { span.SetParentSpanID(parentIDBytes) - } else { - span.SetKind(ptrace.SpanKindServer) } addStartTime(seg.StartTime, span) diff --git a/receiver/awsxrayreceiver/internal/translator/translator_test.go b/receiver/awsxrayreceiver/internal/translator/translator_test.go index 7eb588fdad0fb..c105b72501369 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator_test.go +++ b/receiver/awsxrayreceiver/internal/translator/translator_test.go @@ -905,6 +905,49 @@ func TestTranslation(t *testing.T) { assert.NoError(t, ptracetest.CompareResourceSpans(expectedRs, actualRs)) }, }, + { + testCase: "TranslateSegmentWithParentId", + samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "segmentWithParentId.txt"), + expectedResourceAttrs: func(seg *awsxray.Segment) map[string]any { + return map[string]any{ + string(conventions.ServiceNameKey): *seg.Name, + string(conventions.CloudProviderKey): "unknown", + } + }, + expectedRecord: types.TelemetryRecord{ + SegmentsReceivedCount: aws.Int32(1), + SegmentsRejectedCount: aws.Int32(0), + }, + propsPerSpan: func(_ *testing.T, _ string, seg *awsxray.Segment) []perSpanProperties { + attrs := pcommon.NewMap() + res := perSpanProperties{ + traceID: *seg.TraceID, + spanID: *seg.ID, + parentSpanID: seg.ParentID, + name: *seg.Name, + startTimeSec: *seg.StartTime, + endTimeSec: seg.EndTime, + spanKind: ptrace.SpanKindServer, + spanStatus: spanSt{ + code: ptrace.StatusCodeUnset, + }, + attrs: attrs, + } + return []perSpanProperties{res} + // return nil + }, + verification: func(testCase string, + _ *awsxray.Segment, + expectedRs ptrace.ResourceSpans, actualTraces ptrace.Traces, err error, + ) { + assert.NoError(t, err, testCase+": translation should've succeeded") + assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), + testCase+": one segment should translate to 1 ResourceSpans") + + actualRs := actualTraces.ResourceSpans().At(0) + assert.NoError(t, ptracetest.CompareResourceSpans(expectedRs, actualRs)) + }, + }, { testCase: "TranslateJsonUnmarshallFailed", expectedUnmarshallFailure: true, From f438958f0b87c1eaf69c1d6eff52bb29ebc41a75 Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Thu, 27 Nov 2025 09:33:51 +0100 Subject: [PATCH 28/41] [chore] add changelog for #44397 (#44571) #### Description Add additional changelog for a breaking change introduced by #44397. #### Link to tracking issue Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/44560 #### Testing #### Documentation --------- Signed-off-by: Florian Lehner --- .chloggen/issue-44560.yaml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .chloggen/issue-44560.yaml diff --git a/.chloggen/issue-44560.yaml b/.chloggen/issue-44560.yaml new file mode 100644 index 0000000000000..2959e5bae26c8 --- /dev/null +++ b/.chloggen/issue-44560.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: pkg/ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Type of field profile.duration changes from time.Time to int64. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44397] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] From 5cd6c1b106084169208f1c6a3e162471dc9ea575 Mon Sep 17 00:00:00 2001 From: Khushi Jain Date: Thu, 27 Nov 2025 14:04:22 +0530 Subject: [PATCH 29/41] [receiver/kafka] Remove deprecated legacy topic and encoding (#44568) #### Description This PR removes the deprecated topic and encoding config --- .chloggen/remove-deprecated-topic.yaml | 27 ++++++ receiver/kafkareceiver/config.go | 53 ----------- receiver/kafkareceiver/config_test.go | 98 --------------------- receiver/kafkareceiver/testdata/config.yaml | 20 ----- 4 files changed, 27 insertions(+), 171 deletions(-) create mode 100644 .chloggen/remove-deprecated-topic.yaml diff --git a/.chloggen/remove-deprecated-topic.yaml b/.chloggen/remove-deprecated-topic.yaml new file mode 100644 index 0000000000000..bec81b12b1072 --- /dev/null +++ b/.chloggen/remove-deprecated-topic.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receiver/kafka + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove deprecated topic and encoding + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44568] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/kafkareceiver/config.go b/receiver/kafkareceiver/config.go index c0eed5da3c135..eed203ef68452 100644 --- a/receiver/kafkareceiver/config.go +++ b/receiver/kafkareceiver/config.go @@ -34,24 +34,6 @@ type Config struct { // Profiles holds configuration about how profiles should be consumed. Profiles TopicEncodingConfig `mapstructure:"profiles"` - // Topic holds the name of the Kafka topic from which to consume data. - // - // Topic has no default. If explicitly specified, it will take precedence - // over the default values of Logs.Topic, Traces.Topic, and Metrics.Topic. - // - // Deprecated [v0.124.0]: Use Logs.Topic, Traces.Topic, and Metrics.Topic. - Topic string `mapstructure:"topic"` - - // Encoding holds the expected encoding of messages (default "otlp_proto") - // - // Encoding has no default. If explicitly specified, it will take precedence - // over the default values of Logs.Encoding, Traces.Encoding, and - // Metrics.Encoding. - // - // Deprecated [v0.124.0]: Use Logs.Encoding, Traces.Encoding, and - // Metrics.Encoding. - Encoding string `mapstructure:"encoding"` - // MessageMarking controls the way the messages are marked as consumed. MessageMarking MessageMarking `mapstructure:"message_marking"` @@ -70,41 +52,6 @@ func (c *Config) Unmarshal(conf *confmap.Conf) error { if err := conf.Unmarshal(c); err != nil { return err } - // Check if deprecated fields have been explicitly set, - // in which case they should be used instead of signal- - // specific defaults. - var zeroConfig Config - if err := conf.Unmarshal(&zeroConfig); err != nil { - return err - } - if c.Topic != "" { - if zeroConfig.Logs.Topic == "" { - c.Logs.Topic = c.Topic - } - if zeroConfig.Metrics.Topic == "" { - c.Metrics.Topic = c.Topic - } - if zeroConfig.Traces.Topic == "" { - c.Traces.Topic = c.Topic - } - if zeroConfig.Profiles.Topic == "" { - c.Profiles.Topic = c.Topic - } - } - if c.Encoding != "" { - if zeroConfig.Logs.Encoding == "" { - c.Logs.Encoding = c.Encoding - } - if zeroConfig.Metrics.Encoding == "" { - c.Metrics.Encoding = c.Encoding - } - if zeroConfig.Traces.Encoding == "" { - c.Traces.Encoding = c.Encoding - } - if zeroConfig.Profiles.Encoding == "" { - c.Profiles.Encoding = c.Encoding - } - } // Set OnPermanentError default value to inherit from OnError for backward compatibility // Only if OnPermanentError was not explicitly set in the config diff --git a/receiver/kafkareceiver/config_test.go b/receiver/kafkareceiver/config_test.go index 2d3079d4dc5e1..1493fea1eac2a 100644 --- a/receiver/kafkareceiver/config_test.go +++ b/receiver/kafkareceiver/config_test.go @@ -31,104 +31,6 @@ func TestLoadConfig(t *testing.T) { expected component.Config expectedErr error }{ - { - id: component.NewIDWithName(metadata.Type, ""), - expected: &Config{ - ClientConfig: func() configkafka.ClientConfig { - config := configkafka.NewDefaultClientConfig() - config.Brokers = []string{"foo:123", "bar:456"} - config.ResolveCanonicalBootstrapServersOnly = true - config.ClientID = "the_client_id" - return config - }(), - ConsumerConfig: func() configkafka.ConsumerConfig { - config := configkafka.NewDefaultConsumerConfig() - config.GroupID = "the_group_id" - return config - }(), - Logs: TopicEncodingConfig{ - Topic: "spans", - Encoding: "otlp_proto", - }, - Metrics: TopicEncodingConfig{ - Topic: "spans", - Encoding: "otlp_proto", - }, - Traces: TopicEncodingConfig{ - Topic: "spans", - Encoding: "otlp_proto", - }, - Profiles: TopicEncodingConfig{ - Topic: "spans", - Encoding: "otlp_proto", - }, - Topic: "spans", - ErrorBackOff: configretry.BackOffConfig{ - Enabled: false, - }, - Telemetry: TelemetryConfig{ - Metrics: MetricsConfig{ - KafkaReceiverRecordsDelay: MetricConfig{ - Enabled: true, - }, - }, - }, - }, - }, - { - id: component.NewIDWithName(metadata.Type, "legacy_topic"), - expected: &Config{ - ClientConfig: configkafka.NewDefaultClientConfig(), - ConsumerConfig: configkafka.NewDefaultConsumerConfig(), - Logs: TopicEncodingConfig{ - Topic: "legacy_topic", - Encoding: "otlp_proto", - }, - Metrics: TopicEncodingConfig{ - Topic: "metrics_topic", - Encoding: "otlp_proto", - }, - Traces: TopicEncodingConfig{ - Topic: "legacy_topic", - Encoding: "otlp_proto", - }, - Profiles: TopicEncodingConfig{ - Topic: "legacy_topic", - Encoding: "otlp_proto", - }, - Topic: "legacy_topic", - ErrorBackOff: configretry.BackOffConfig{ - Enabled: false, - }, - }, - }, - { - id: component.NewIDWithName(metadata.Type, "legacy_encoding"), - expected: &Config{ - ClientConfig: configkafka.NewDefaultClientConfig(), - ConsumerConfig: configkafka.NewDefaultConsumerConfig(), - Logs: TopicEncodingConfig{ - Topic: "otlp_logs", - Encoding: "legacy_encoding", - }, - Metrics: TopicEncodingConfig{ - Topic: "otlp_metrics", - Encoding: "metrics_encoding", - }, - Traces: TopicEncodingConfig{ - Topic: "otlp_spans", - Encoding: "legacy_encoding", - }, - Profiles: TopicEncodingConfig{ - Topic: "otlp_profiles", - Encoding: "legacy_encoding", - }, - Encoding: "legacy_encoding", - ErrorBackOff: configretry.BackOffConfig{ - Enabled: false, - }, - }, - }, { id: component.NewIDWithName(metadata.Type, "logs"), expected: &Config{ diff --git a/receiver/kafkareceiver/testdata/config.yaml b/receiver/kafkareceiver/testdata/config.yaml index 96dc1ec54710c..765b6bcbb4ba2 100644 --- a/receiver/kafkareceiver/testdata/config.yaml +++ b/receiver/kafkareceiver/testdata/config.yaml @@ -1,23 +1,3 @@ -kafka: - topic: spans - brokers: - - "foo:123" - - "bar:456" - resolve_canonical_bootstrap_servers_only: true - client_id: the_client_id - group_id: the_group_id - telemetry: - metrics: - kafka_receiver_records_delay: - enabled: true -kafka/legacy_topic: - topic: legacy_topic - metrics: - topic: metrics_topic -kafka/legacy_encoding: - encoding: legacy_encoding - metrics: - encoding: metrics_encoding kafka/logs: logs: topic: logs From 8c99a07ea31b4cf7f2f909840c3ad9527be42fd0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 27 Nov 2025 00:41:54 -0800 Subject: [PATCH 30/41] fix(deps): update kubernetes packages to v0.34.2 (#44506) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Confidence | |---|---|---|---| | [k8s.io/apimachinery](https://redirect.github.com/kubernetes/apimachinery) | `v0.34.1` -> `v0.34.2` | [![age](https://developer.mend.io/api/mc/badges/age/go/k8s.io%2fapimachinery/v0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/k8s.io%2fapimachinery/v0.34.1/v0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [k8s.io/client-go](https://redirect.github.com/kubernetes/client-go) | `v0.34.1` -> `v0.34.2` | [![age](https://developer.mend.io/api/mc/badges/age/go/k8s.io%2fclient-go/v0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/k8s.io%2fclient-go/v0.34.1/v0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
kubernetes/apimachinery (k8s.io/apimachinery) ### [`v0.34.2`](https://redirect.github.com/kubernetes/apimachinery/compare/v0.34.1...v0.34.2) [Compare Source](https://redirect.github.com/kubernetes/apimachinery/compare/v0.34.1...v0.34.2)
kubernetes/client-go (k8s.io/client-go) ### [`v0.34.2`](https://redirect.github.com/kubernetes/client-go/compare/v0.34.1...v0.34.2) [Compare Source](https://redirect.github.com/kubernetes/client-go/compare/v0.34.1...v0.34.2)
--- ### Configuration πŸ“… **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. β™» **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. πŸ”• **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: otelbot <197425009+otelbot@users.noreply.github.com> --- internal/k8sinventory/go.mod | 4 ++-- internal/k8sinventory/go.sum | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/k8sinventory/go.mod b/internal/k8sinventory/go.mod index d0294aa1f25cb..62f87bcd85610 100644 --- a/internal/k8sinventory/go.mod +++ b/internal/k8sinventory/go.mod @@ -5,8 +5,8 @@ go 1.24.4 require ( github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.1 - k8s.io/apimachinery v0.34.1 - k8s.io/client-go v0.34.1 + k8s.io/apimachinery v0.34.2 + k8s.io/client-go v0.34.2 ) require ( diff --git a/internal/k8sinventory/go.sum b/internal/k8sinventory/go.sum index 7a6aed6e93aa1..5f7c77cacf033 100644 --- a/internal/k8sinventory/go.sum +++ b/internal/k8sinventory/go.sum @@ -142,12 +142,12 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= -k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= -k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= -k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= -k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= From 5b3e9b34d1a3fde3c0527e962284b56dae1c5b89 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Thu, 27 Nov 2025 09:45:05 +0100 Subject: [PATCH 31/41] [processor/tailsampling] Remove internally used fields from samplingpolicy.TraceData (#44435) #### Description These fields are only used by the internals of the tail sampling processor to track metrics or the decision that was made. They are not needed for any samplers, and therefore should not be used by any sampler extensions either. --- .../remove-extra-fields-in-trace-data.yaml | 27 +++++++++++++ .../pkg/samplingpolicy/samplingpolicy.go | 7 ---- processor/tailsamplingprocessor/processor.go | 39 ++++++++++++------- .../processor_benchmarks_test.go | 1 - 4 files changed, 53 insertions(+), 21 deletions(-) create mode 100644 .chloggen/remove-extra-fields-in-trace-data.yaml diff --git a/.chloggen/remove-extra-fields-in-trace-data.yaml b/.chloggen/remove-extra-fields-in-trace-data.yaml new file mode 100644 index 0000000000000..fb0280cf6c1cd --- /dev/null +++ b/.chloggen/remove-extra-fields-in-trace-data.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: processor/tail_sampling + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove only internally relevant fields from samplingpolicy.TraceData. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44435] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/processor/tailsamplingprocessor/pkg/samplingpolicy/samplingpolicy.go b/processor/tailsamplingprocessor/pkg/samplingpolicy/samplingpolicy.go index eb363a0f18b70..e1bf7d56de700 100644 --- a/processor/tailsamplingprocessor/pkg/samplingpolicy/samplingpolicy.go +++ b/processor/tailsamplingprocessor/pkg/samplingpolicy/samplingpolicy.go @@ -5,7 +5,6 @@ package samplingpolicy // import "github.com/open-telemetry/opentelemetry-collec import ( "context" - "time" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" @@ -13,16 +12,10 @@ import ( // TraceData stores the sampling related trace data. type TraceData struct { - // Arrival time the first span for the trace was received. - ArrivalTime time.Time - // DecisionTime time when sampling decision was taken. - DecisionTime time.Time // SpanCount track the number of spans on the trace. SpanCount int64 // ReceivedBatches stores all the batches received for the trace. ReceivedBatches ptrace.Traces - // FinalDecision. - FinalDecision Decision } // Decision gives the status of sampling decision. diff --git a/processor/tailsamplingprocessor/processor.go b/processor/tailsamplingprocessor/processor.go index 641277d494285..b526cc2b3d160 100644 --- a/processor/tailsamplingprocessor/processor.go +++ b/processor/tailsamplingprocessor/processor.go @@ -41,6 +41,17 @@ type policy struct { attribute metric.MeasurementOption } +// traceData is a wrapper around the publically used samplingpolicy.TraceData +// that tracks information related to the decision making process but not +// needed by any sampler implementations. +type traceData struct { + samplingpolicy.TraceData + + arrivalTime time.Time + decisionTime time.Time + finalDecision samplingpolicy.Decision +} + type tailSamplingSpanProcessor struct { ctx context.Context @@ -51,7 +62,7 @@ type tailSamplingSpanProcessor struct { deleteTraceQueue *list.List nextConsumer consumer.Traces policies []*policy - idToTrace map[pcommon.TraceID]*samplingpolicy.TraceData + idToTrace map[pcommon.TraceID]*traceData tickerFrequency time.Duration decisionBatcher idbatcher.Batcher sampledIDCache cache.Cache[bool] @@ -103,7 +114,7 @@ func newTracesProcessor(ctx context.Context, set processor.Settings, nextConsume sampledIDCache: sampledDecisions, nonSampledIDCache: nonSampledDecisions, logger: set.Logger, - idToTrace: make(map[pcommon.TraceID]*samplingpolicy.TraceData), + idToTrace: make(map[pcommon.TraceID]*traceData), deleteTraceQueue: list.New(), sampleOnFirstMatch: cfg.SampleOnFirstMatch, blockOnOverflow: cfg.BlockOnOverflow, @@ -536,14 +547,14 @@ func (tsp *tailSamplingSpanProcessor) samplingPolicyOnTick() bool { metrics.idNotFoundOnMapCount++ continue } - trace.DecisionTime = time.Now() + trace.decisionTime = time.Now() - decision := tsp.makeDecision(id, trace, metrics) + decision := tsp.makeDecision(id, &trace.TraceData, metrics) globalTracesSampledByDecision[decision]++ // Sampled or not, remove the batches allSpans := trace.ReceivedBatches - trace.FinalDecision = decision + trace.finalDecision = decision trace.ReceivedBatches = ptrace.NewTraces() if decision == samplingpolicy.Sampled { @@ -691,10 +702,12 @@ func (tsp *tailSamplingSpanProcessor) processTrace(id pcommon.TraceID, rss ptrac actualData, ok := tsp.idToTrace[id] if !ok { - actualData = &samplingpolicy.TraceData{ - ArrivalTime: currTime, - SpanCount: spanCount, - ReceivedBatches: ptrace.NewTraces(), + actualData = &traceData{ + arrivalTime: currTime, + TraceData: samplingpolicy.TraceData{ + SpanCount: spanCount, + ReceivedBatches: ptrace.NewTraces(), + }, } tsp.idToTrace[id] = actualData @@ -709,7 +722,7 @@ func (tsp *tailSamplingSpanProcessor) processTrace(id pcommon.TraceID, rss ptrac actualData.SpanCount += spanCount } - finalDecision := actualData.FinalDecision + finalDecision := actualData.finalDecision if finalDecision == samplingpolicy.Unspecified { // If the final decision hasn't been made, add the new spans to the @@ -729,8 +742,8 @@ func (tsp *tailSamplingSpanProcessor) processTrace(id pcommon.TraceID, rss ptrac tsp.logger.Warn("Unexpected sampling decision", zap.Int("decision", int(finalDecision))) } - if !actualData.DecisionTime.IsZero() { - tsp.telemetry.ProcessorTailSamplingSamplingLateSpanAge.Record(tsp.ctx, int64(time.Since(actualData.DecisionTime)/time.Second)) + if !actualData.decisionTime.IsZero() { + tsp.telemetry.ProcessorTailSamplingSamplingLateSpanAge.Record(tsp.ctx, int64(time.Since(actualData.decisionTime)/time.Second)) } } @@ -767,7 +780,7 @@ func (tsp *tailSamplingSpanProcessor) dropTrace(traceID pcommon.TraceID, deletio } delete(tsp.idToTrace, traceID) - tsp.telemetry.ProcessorTailSamplingSamplingTraceRemovalAge.Record(tsp.ctx, int64(deletionTime.Sub(trace.ArrivalTime)/time.Second)) + tsp.telemetry.ProcessorTailSamplingSamplingTraceRemovalAge.Record(tsp.ctx, int64(deletionTime.Sub(trace.arrivalTime)/time.Second)) } // forwardSpans sends the trace data to the next consumer. it is different from diff --git a/processor/tailsamplingprocessor/processor_benchmarks_test.go b/processor/tailsamplingprocessor/processor_benchmarks_test.go index 5b90b07916704..4a8202e570a42 100644 --- a/processor/tailsamplingprocessor/processor_benchmarks_test.go +++ b/processor/tailsamplingprocessor/processor_benchmarks_test.go @@ -39,7 +39,6 @@ func BenchmarkSampling(b *testing.B) { for _, batch := range batches { sampleBatches = append(sampleBatches, &samplingpolicy.TraceData{ - ArrivalTime: time.Now(), SpanCount: 0, ReceivedBatches: batch, }) From c27b4bd7ffeb644e2ac140f59c6ee05e85e64203 Mon Sep 17 00:00:00 2001 From: Christos Markou Date: Fri, 28 Nov 2025 00:20:18 +0200 Subject: [PATCH 32/41] Update k8slogreceiver code-owners status and mark as unmaintained (#44578) #### Description Based on the discussion from https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/44078, this PR updates the component's code-owners' status and marks it as unmaintained until there is further interest for its development. ref: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#unmaintained > After 3 months of being unmaintained, these components will be removed from official distribution. #### Link to tracking issue Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/44078 #### Testing ~ #### Documentation Tuned Signed-off-by: ChrsMark --- .chloggen/update_k8slogsreceiver_status.yaml | 27 +++++++++++++++++++ .github/ALLOWLIST | 1 + .github/CODEOWNERS | 2 +- .github/component_labels.txt | 2 +- receiver/k8slogreceiver/README.md | 7 ++--- .../internal/metadata/generated_status.go | 2 +- receiver/k8slogreceiver/metadata.yaml | 5 ++-- 7 files changed, 38 insertions(+), 8 deletions(-) create mode 100644 .chloggen/update_k8slogsreceiver_status.yaml diff --git a/.chloggen/update_k8slogsreceiver_status.yaml b/.chloggen/update_k8slogsreceiver_status.yaml new file mode 100644 index 0000000000000..03cafe9228347 --- /dev/null +++ b/.chloggen/update_k8slogsreceiver_status.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: receiver/k8slog + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Update k8slogreceiver code-owners status and mark as unmaintained + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44078] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.github/ALLOWLIST b/.github/ALLOWLIST index 65cd0761183d7..a835371aeb080 100644 --- a/.github/ALLOWLIST +++ b/.github/ALLOWLIST @@ -31,5 +31,6 @@ internal/common receiver/bigipreceiver receiver/carbonreceiver +receiver/k8slogreceiver # End unmaintained components list diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ad0c0f8cf9e29..6eacd64e8a743 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -292,7 +292,6 @@ receiver/jmxreceiver/ @open-telemetry receiver/journaldreceiver/ @open-telemetry/collector-contrib-approvers @belimawr @namco1992 receiver/k8sclusterreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth @povilasv @ChrsMark receiver/k8seventsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth @ChrsMark -receiver/k8slogreceiver/ @open-telemetry/collector-contrib-approvers @h0cheung @TylerHelmuth receiver/k8sobjectsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @hvaghani221 @TylerHelmuth @ChrsMark @krisztianfekete receiver/kafkametricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax receiver/kafkareceiver/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy @axw @paulojmdias @@ -386,5 +385,6 @@ reports/distributions/otlp.yaml @open-telemetry/collector-contrib-approvers receiver/bigipreceiver/ @open-telemetry/collector-contrib-approvers receiver/carbonreceiver/ @open-telemetry/collector-contrib-approvers +receiver/k8slogreceiver/ @open-telemetry/collector-contrib-approvers # End unmaintained components list diff --git a/.github/component_labels.txt b/.github/component_labels.txt index fd7052e8ab29a..70d2156ef46fa 100644 --- a/.github/component_labels.txt +++ b/.github/component_labels.txt @@ -273,7 +273,6 @@ receiver/jmxreceiver receiver/jmx receiver/journaldreceiver receiver/journald receiver/k8sclusterreceiver receiver/k8scluster receiver/k8seventsreceiver receiver/k8sevents -receiver/k8slogreceiver receiver/k8slog receiver/k8sobjectsreceiver receiver/k8sobjects receiver/kafkametricsreceiver receiver/kafkametrics receiver/kafkareceiver receiver/kafka @@ -345,3 +344,4 @@ reports/distributions/k8s.yaml reports/distributions/k8s.yaml reports/distributions/otlp.yaml reports/distributions/otlp.yaml receiver/bigipreceiver receiver/bigip receiver/carbonreceiver receiver/carbon +receiver/k8slogreceiver receiver/k8slog diff --git a/receiver/k8slogreceiver/README.md b/receiver/k8slogreceiver/README.md index 49b3db91159cf..ff4396442d373 100644 --- a/receiver/k8slogreceiver/README.md +++ b/receiver/k8slogreceiver/README.md @@ -3,13 +3,14 @@ | Status | | | ------------- |-----------| -| Stability | [development]: logs | +| Stability | [unmaintained]: logs | | Distributions | [] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fk8slog%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fk8slog) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fk8slog%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fk8slog) | | Code coverage | [![codecov](https://codecov.io/github/open-telemetry/opentelemetry-collector-contrib/graph/main/badge.svg?component=receiver_k8slog)](https://app.codecov.io/gh/open-telemetry/opentelemetry-collector-contrib/tree/main/?components%5B0%5D=receiver_k8slog&displayType=list) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@h0cheung](https://www.github.com/h0cheung), [@TylerHelmuth](https://www.github.com/TylerHelmuth) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | \| Seeking more code owners! | +| Emeritus | [@h0cheung](https://www.github.com/h0cheung), [@TylerHelmuth](https://www.github.com/TylerHelmuth) | -[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[unmaintained]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#unmaintained Tails and parses logs in k8s environment. diff --git a/receiver/k8slogreceiver/internal/metadata/generated_status.go b/receiver/k8slogreceiver/internal/metadata/generated_status.go index 51c2eef5a31c6..2d4da7ac5f61a 100644 --- a/receiver/k8slogreceiver/internal/metadata/generated_status.go +++ b/receiver/k8slogreceiver/internal/metadata/generated_status.go @@ -12,5 +12,5 @@ var ( ) const ( - LogsStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelUnmaintained ) diff --git a/receiver/k8slogreceiver/metadata.yaml b/receiver/k8slogreceiver/metadata.yaml index 3230d065d7deb..3bb1110beab29 100644 --- a/receiver/k8slogreceiver/metadata.yaml +++ b/receiver/k8slogreceiver/metadata.yaml @@ -3,6 +3,7 @@ type: k8slog status: class: receiver stability: - development: [logs] + unmaintained: [logs] codeowners: - active: [h0cheung, TylerHelmuth] + emeritus: [h0cheung, TylerHelmuth] + seeking_new: true From c11af196d3fb24a0af0f25266476a028b72cc074 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9lian=20GARCIA?= Date: Thu, 27 Nov 2025 23:46:40 +0100 Subject: [PATCH 33/41] [receiver/azuremonitorreceiver] Collect only supported aggregations for each metric (501 not implemented issue) (#43880) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit #### Description When we collect metrics, we ask for all available aggregations (Average, Sum, Min, Max, ...) but when we do so with some metrics, Azure API is giving a 501 Not Implemented error. So we need to really respect the definition of each metric. #### Link to tracking issue Relates #43648 (not fully fixing) #### Testing For the full e2e tests, the impact on tests has only been to setup supported aggregation in the ``metrics definition`` mocks. We simulate supporting all aggregation in the definition to make sure the tests results are unchanged. We rely on unit test of the ``getMetricAggregations`` function to test a more variety of cases. #### Documentation Updated to provide more details. image Signed-off-by: CΓ©lian Garcia Signed-off-by: Celian GARCIA --- .chloggen/fix_43648.yaml | 30 +++++++++++ receiver/azuremonitorreceiver/README.md | 17 ++++++- receiver/azuremonitorreceiver/mocks_test.go | 20 ++++++-- receiver/azuremonitorreceiver/scraper.go | 35 +++++++++---- .../azuremonitorreceiver/scraper_batch.go | 2 +- receiver/azuremonitorreceiver/scraper_test.go | 50 +++++++++++++------ 6 files changed, 123 insertions(+), 31 deletions(-) create mode 100644 .chloggen/fix_43648.yaml diff --git a/.chloggen/fix_43648.yaml b/.chloggen/fix_43648.yaml new file mode 100644 index 0000000000000..f98b82e1a0493 --- /dev/null +++ b/.chloggen/fix_43648.yaml @@ -0,0 +1,30 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: receiver/azuremonitor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Collect only supported aggregations for each metric (501 not implemented issue) + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [43648] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Some metrics were not collected because we requested all available aggregation types. This led to 501 errors, as the Azure API returned responses indicating that certain aggregations were not implemented. + We now use the supported aggregations field from each metric definition to filter and request only the aggregations that are actually supported. + The user can expect less 501 errors in the logs and more metrics in the results. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/azuremonitorreceiver/README.md b/receiver/azuremonitorreceiver/README.md index 46eee01b95469..7b7685a54334b 100644 --- a/receiver/azuremonitorreceiver/README.md +++ b/receiver/azuremonitorreceiver/README.md @@ -60,7 +60,21 @@ Authenticating using managed identities has the following optional settings: ### Filtering metrics -The `metrics` configuration setting is designed to limit scraping to specific metrics and their particular aggregations. It accepts a nested map where the key of the top-level is the Azure Metric Namespace, the key of the nested map is an Azure Metric Name, and the map values are a list of aggregation methods (e.g., Average, Minimum, Maximum, Total, Count). Additionally, the metric map value can be an empty array or an array with one element `*` (asterisk). In this case, the scraper will fetch all supported aggregations for a metric. The letter case of the Namespaces, Metric names, and Aggregations does not affect the functionality. +The `metrics` configuration setting is designed to **limit** scraping to specific metrics and their particular aggregations. +It accepts a nested map where +- the key of the top-level is the Azure Metric Namespace, +- the key of the nested map is an Azure Metric Name, +- and the map values are a list of aggregation methods (e.g., Average, Minimum, Maximum, Total, Count). + +> [!NOTE] +> - **"All aggregations" shortcut**: The metric map value can be an empty array ``[]`` or an array with one "wildcard" element `[*]`. + In this case, the scraper will fetch **all supported aggregations** for that metric, which is also the case if no + `metrics` configuration is provided. +> - **Case Insensitive**: The letter case of the Namespaces, Metric names, and Aggregations does not affect the functionality. + +> [!WARNING] +> If you started providing a `metrics` configuration for a namespace, you have to specify all the metrics and their +> aggregations for that namespace. Otherwise, these metrics will be ignored. Scraping limited metrics and aggregations: @@ -76,6 +90,7 @@ receivers: "microsoft.eventhub/namespaces": # scraper will fetch only the metrics listed below: IncomingMessages: [total] # metric IncomingMessages with aggregation "Total" NamespaceCpuUsage: [*] # metric NamespaceCpuUsage with all known aggregations + ActiveConnections: [] # metric ActiveConnections with all known aggregations (same effect than [*]) ``` ### Use Batch API (experimental) diff --git a/receiver/azuremonitorreceiver/mocks_test.go b/receiver/azuremonitorreceiver/mocks_test.go index b095c72ff06cc..695efdc5aa902 100644 --- a/receiver/azuremonitorreceiver/mocks_test.go +++ b/receiver/azuremonitorreceiver/mocks_test.go @@ -299,12 +299,13 @@ func newResourcesMockData(inputMap map[string][][]*armresources.GenericResourceE } // metricsDefinitionMockInput is used to mock the response of the metrics definition API. -// Everything is required except dimensions. +// Everything is required except dimensions and supportedAggregationTypes. type metricsDefinitionMockInput struct { - namespace string - name string - timeGrain string - dimensions []string + namespace string // required + name string // required + timeGrain string // required + dimensions []string // optional + supportedAggregationTypes []armmonitor.AggregationType // optional } // newMetricsDefinitionMockData is a helper function to create metrics definition list response. @@ -325,6 +326,15 @@ func newMetricsDefinitionMockData(inputMap map[string][]metricsDefinitionMockInp for _, dimension := range input.dimensions { toAppend.Dimensions = append(toAppend.Dimensions, &armmonitor.LocalizableString{Value: &dimension}) } + + if len(input.supportedAggregationTypes) == 0 { + input.supportedAggregationTypes = armmonitor.PossibleAggregationTypeValues() + } + + for _, aggregationType := range input.supportedAggregationTypes { + toAppend.SupportedAggregationTypes = append(toAppend.SupportedAggregationTypes, &aggregationType) + } + values[i] = &toAppend } diff --git a/receiver/azuremonitorreceiver/scraper.go b/receiver/azuremonitorreceiver/scraper.go index 6dec4e6521f86..c1750c378bed1 100644 --- a/receiver/azuremonitorreceiver/scraper.go +++ b/receiver/azuremonitorreceiver/scraper.go @@ -366,7 +366,7 @@ func (s *azureScraper) getResourceMetricsDefinitions(ctx context.Context, subscr for _, v := range nextResult.Value { metricName := *v.Name.Value - metricAggregations := getMetricAggregations(*v.Namespace, metricName, s.cfg.Metrics) + metricAggregations := getMetricAggregations(*v.Namespace, metricName, s.cfg.Metrics, convertAggregationsToStr(v.SupportedAggregationTypes)) if len(metricAggregations) == 0 { continue } @@ -515,30 +515,39 @@ func (s *azureScraper) processTimeseriesData( } } -func getMetricAggregations(metricNamespace, metricName string, filters NestedListAlias) []string { +// getMetricAggregations returns a list of aggregations for a given namespace/metric. +// Two parameters are considered to know the aggregation to choose +// - a filter (given in configuration) +// - a list of supported aggregations (given by the API) +// If one namespace/metrics combination matches a provided filter, +// > Then it returns the aggregations in the filter +// > Otherwise it returns all supported aggregations. +// Note that a special filter * is supported to return all supported aggregations explicitly. +// /!\ It does not control the aggregations in the filters. If it's not in the supported list, it still lets it pass. +func getMetricAggregations(metricNamespace, metricName string, filters NestedListAlias, supportedAggregations []string) []string { // default behavior when no metric filters specified: pass all metrics with all aggregations if len(filters) == 0 { - return aggregations + return supportedAggregations } metricsFilters, ok := mapFindInsensitive(filters, metricNamespace) - // metric namespace not found or it's empty: pass all metrics from the namespace + // metric namespace isn't found, or it's empty: pass all metrics from the namespace if !ok || len(metricsFilters) == 0 { - return aggregations + return supportedAggregations } aggregationsFilters, ok := mapFindInsensitive(metricsFilters, metricName) - // if target metric is absent in metrics map: filter out metric + // if the target metric is absent in the metrics map: filter out metric if !ok { return []string{} } // allow all aggregations if others are not specified if len(aggregationsFilters) == 0 || slices.Contains(aggregationsFilters, filterAllAggregations) { - return aggregations + return supportedAggregations } - // collect known supported aggregations - out := []string{} + // collect known aggregations without filtering on supported + var out []string for _, filter := range aggregationsFilters { for _, aggregation := range aggregations { if strings.EqualFold(aggregation, filter) { @@ -550,6 +559,14 @@ func getMetricAggregations(metricNamespace, metricName string, filters NestedLis return out } +func convertAggregationsToStr(aggregations []*armmonitor.AggregationType) []string { + var result []string + for _, aggr := range aggregations { + result = append(result, string(*aggr)) + } + return result +} + func mapFindInsensitive[T any](m map[string]T, key string) (T, bool) { for k, v := range m { if strings.EqualFold(key, k) { diff --git a/receiver/azuremonitorreceiver/scraper_batch.go b/receiver/azuremonitorreceiver/scraper_batch.go index 636e80547cb7d..2a02880a96a61 100644 --- a/receiver/azuremonitorreceiver/scraper_batch.go +++ b/receiver/azuremonitorreceiver/scraper_batch.go @@ -351,7 +351,7 @@ func (s *azureBatchScraper) getResourceMetricsDefinitionsByType(ctx context.Cont for _, v := range nextResult.Value { metricName := *v.Name.Value - metricAggregations := getMetricAggregations(*v.Namespace, metricName, s.cfg.Metrics) + metricAggregations := getMetricAggregations(*v.Namespace, metricName, s.cfg.Metrics, convertAggregationsToStr(v.SupportedAggregationTypes)) if len(metricAggregations) == 0 { continue } diff --git a/receiver/azuremonitorreceiver/scraper_test.go b/receiver/azuremonitorreceiver/scraper_test.go index 3a23146236afe..c2c9332292059 100644 --- a/receiver/azuremonitorreceiver/scraper_test.go +++ b/receiver/azuremonitorreceiver/scraper_test.go @@ -583,21 +583,24 @@ func TestGetMetricAggregations(t *testing.T) { testNamespaceName := "Microsoft.AAD/DomainServices" testMetricName := "MetricName" tests := []struct { - name string - filters NestedListAlias - want []string + name string + filters NestedListAlias + supportedAggregations []string + want []string }{ { - name: "should return all aggregations when metrics filter empty", - filters: NestedListAlias{}, - want: aggregations, + name: "should return supported aggregations when metrics filter empty", + filters: NestedListAlias{}, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0]}, }, { name: "should return all aggregations when namespace not in filters", filters: NestedListAlias{ "another.namespace": nil, }, - want: aggregations, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0]}, }, { name: "should return all aggregations when metric in filters", @@ -606,7 +609,8 @@ func TestGetMetricAggregations(t *testing.T) { testMetricName: {}, }, }, - want: aggregations, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0]}, }, { name: "should return all aggregations ignoring metric name case", @@ -615,7 +619,8 @@ func TestGetMetricAggregations(t *testing.T) { strings.ToLower(testMetricName): {}, }, }, - want: aggregations, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0]}, }, { name: "should return all aggregations when asterisk in filters", @@ -624,7 +629,8 @@ func TestGetMetricAggregations(t *testing.T) { testMetricName: {filterAllAggregations}, }, }, - want: aggregations, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0]}, }, { name: "should be empty when metric not in filters", @@ -633,7 +639,8 @@ func TestGetMetricAggregations(t *testing.T) { "not_this_metric": {}, }, }, - want: []string{}, + supportedAggregations: []string{aggregations[0]}, + want: []string{}, }, { name: "should return one aggregations", @@ -642,7 +649,8 @@ func TestGetMetricAggregations(t *testing.T) { testMetricName: {aggregations[0]}, }, }, - want: []string{aggregations[0]}, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0]}, }, { name: "should return one aggregations ignoring aggregation case", @@ -651,7 +659,18 @@ func TestGetMetricAggregations(t *testing.T) { testMetricName: {strings.ToLower(aggregations[0])}, }, }, - want: []string{aggregations[0]}, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0]}, + }, + { + name: "should return one aggregations even if not supported", + filters: NestedListAlias{ + testNamespaceName: { + testMetricName: {aggregations[0]}, + }, + }, + supportedAggregations: []string{aggregations[2]}, + want: []string{aggregations[0]}, }, { name: "should return many aggregations", @@ -660,13 +679,14 @@ func TestGetMetricAggregations(t *testing.T) { testMetricName: {aggregations[0], aggregations[2]}, }, }, - want: []string{aggregations[0], aggregations[2]}, + supportedAggregations: []string{aggregations[0]}, + want: []string{aggregations[0], aggregations[2]}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := getMetricAggregations(testNamespaceName, testMetricName, tt.filters) + got := getMetricAggregations(testNamespaceName, testMetricName, tt.filters, tt.supportedAggregations) require.Equal(t, tt.want, got) }) } From e3474d7103f8ce90b5c0edb6cda19330bc14a95c Mon Sep 17 00:00:00 2001 From: Zymantas Maumevicius Date: Fri, 28 Nov 2025 08:34:39 +0200 Subject: [PATCH 34/41] [reveicer/prometheusreceiver] Add feature gate for extra scrape metrics in Prometheus receiver (#44539) #### Description Add feature gate for extra scape metrics in prometheus receiver along side current configuration. Change should not change any current behavior, just adds new additional input stream, which needs to be migrated later #### Link to tracking issue Fixes #44181 #### Testing Updated tests to support mainly new input - feature gate, due to the fact of deprecation of configuration setup --------- Co-authored-by: Arthur Silva Sens --- ...ceiver-featuregate-extrascrapemetrics.yaml | 27 +++++++++++++++++++ receiver/prometheusreceiver/README.md | 11 ++++++-- receiver/prometheusreceiver/config.go | 2 ++ receiver/prometheusreceiver/factory.go | 7 +++++ .../prometheusreceiver/metrics_receiver.go | 2 +- ...ceiver_report_extra_scrape_metrics_test.go | 10 ++++--- 6 files changed, 52 insertions(+), 7 deletions(-) create mode 100644 .chloggen/feat_prometheusreceiver-featuregate-extrascrapemetrics.yaml diff --git a/.chloggen/feat_prometheusreceiver-featuregate-extrascrapemetrics.yaml b/.chloggen/feat_prometheusreceiver-featuregate-extrascrapemetrics.yaml new file mode 100644 index 0000000000000..3bb91b66e3b2a --- /dev/null +++ b/.chloggen/feat_prometheusreceiver-featuregate-extrascrapemetrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: deprecation + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: receiver/prometheus + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add feature gate for extra scrape metrics in Prometheus receiver + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [ 44181 ] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: deprecation of extra scrape metrics in Prometheus receiver will be removed eventually. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [ user ] diff --git a/receiver/prometheusreceiver/README.md b/receiver/prometheusreceiver/README.md index c6f8385d99f34..f8238507ab247 100644 --- a/receiver/prometheusreceiver/README.md +++ b/receiver/prometheusreceiver/README.md @@ -88,7 +88,7 @@ The prometheus receiver also supports additional top-level options: - **trim_metric_suffixes**: [**Experimental**] When set to true, this enables trimming unit and some counter type suffixes from metric names. For example, it would cause `singing_duration_seconds_total` to be trimmed to `singing_duration`. This can be useful when trying to restore the original metric names used in OpenTelemetry instrumentation. Defaults to false. - **use_start_time_metric**: When set to true, this enables retrieving the start time of all counter metrics from the process_start_time_seconds metric. This is only correct if all counters on that endpoint started after the process start time, and the process is the only actor exporting the metric after the process started. It should not be used in "exporters" which export counters that may have started before the process itself. Use only if you know what you are doing, as this may result in incorrect rate calculations. Defaults to false. - **start_time_metric_regex**: The regular expression for the start time metric, and is only applied when use_start_time_metric is enabled. Defaults to process_start_time_seconds. -- **report_extra_scrape_metrics**: Extra Prometheus scrape metrics can be reported by setting this parameter to `true` +- **report_extra_scrape_metrics**: Extra Prometheus scrape metrics can be reported by setting this parameter to `true`. Deprecated; use the feature gate `receiver.prometheusreceiver.EnableReportExtraScrapeMetrics` instead. Example configuration: @@ -97,7 +97,6 @@ receivers: prometheus: trim_metric_suffixes: true use_start_time_metric: true - report_extra_scrape_metrics: true start_time_metric_regex: foo_bar_.* config: scrape_configs: @@ -182,6 +181,14 @@ More info about querying `/api/v1/` and the data format that is returned can be ## Feature gates +- `receiver.prometheusreceiver.EnableReportExtraScrapeMetrics`: Extra Prometheus scrape metrics + can be reported by setting this feature gate option. This replaces the deprecated + `report_extra_scrape_metrics` configuration flag: + +```shell +"--feature-gates=receiver.prometheusreceiver.EnableReportExtraScrapeMetrics" +``` + - `receiver.prometheusreceiver.UseCreatedMetric`: Start time for Summary, Histogram and Sum metrics can be retrieved from `_created` metrics. Currently, this behaviour is disabled by default. To enable it, use the following feature gate option: diff --git a/receiver/prometheusreceiver/config.go b/receiver/prometheusreceiver/config.go index efc6aee51cf3b..84305a9969ed1 100644 --- a/receiver/prometheusreceiver/config.go +++ b/receiver/prometheusreceiver/config.go @@ -39,6 +39,8 @@ type Config struct { StartTimeMetricRegex string `mapstructure:"start_time_metric_regex"` // ReportExtraScrapeMetrics - enables reporting of additional metrics for Prometheus client like scrape_body_size_bytes + // + // Deprecated: use the feature gate "receiver.prometheusreceiver.EnableReportExtraScrapeMetrics" instead. ReportExtraScrapeMetrics bool `mapstructure:"report_extra_scrape_metrics"` TargetAllocator configoptional.Optional[targetallocator.Config] `mapstructure:"target_allocator"` diff --git a/receiver/prometheusreceiver/factory.go b/receiver/prometheusreceiver/factory.go index 20c8064f92f1e..4af7b29e1bae1 100644 --- a/receiver/prometheusreceiver/factory.go +++ b/receiver/prometheusreceiver/factory.go @@ -40,6 +40,13 @@ var enableCreatedTimestampZeroIngestionGate = featuregate.GlobalRegistry().MustR " Created timestamps are injected as 0 valued samples when appropriate."), ) +var enableReportExtraScrapeMetricsGate = featuregate.GlobalRegistry().MustRegister( + "receiver.prometheusreceiver.EnableReportExtraScrapeMetrics", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("Enables reporting of extra scrape metrics."+ + " Extra scrape metrics are metrics that are not scraped by Prometheus but are reported by the Prometheus server."), +) + // NewFactory creates a new Prometheus receiver factory. func NewFactory() receiver.Factory { return receiver.NewFactory( diff --git a/receiver/prometheusreceiver/metrics_receiver.go b/receiver/prometheusreceiver/metrics_receiver.go index 5ba159363a89c..8d8bd532bd957 100644 --- a/receiver/prometheusreceiver/metrics_receiver.go +++ b/receiver/prometheusreceiver/metrics_receiver.go @@ -227,7 +227,7 @@ func (r *pReceiver) initPrometheusComponents(ctx context.Context, logger *slog.L func (r *pReceiver) initScrapeOptions() *scrape.Options { opts := &scrape.Options{ PassMetadataInContext: true, - ExtraMetrics: r.cfg.ReportExtraScrapeMetrics, + ExtraMetrics: enableReportExtraScrapeMetricsGate.IsEnabled() || r.cfg.ReportExtraScrapeMetrics, HTTPClientOptions: []commonconfig.HTTPClientOption{ commonconfig.WithUserAgent(r.settings.BuildInfo.Command + "/" + r.settings.BuildInfo.Version), }, diff --git a/receiver/prometheusreceiver/metrics_receiver_report_extra_scrape_metrics_test.go b/receiver/prometheusreceiver/metrics_receiver_report_extra_scrape_metrics_test.go index fa3c77944fbd6..5da14dc2f60b9 100644 --- a/receiver/prometheusreceiver/metrics_receiver_report_extra_scrape_metrics_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_report_extra_scrape_metrics_test.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/receivertest" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metadata" ) @@ -48,6 +49,8 @@ func TestReportExtraScrapeMetrics(t *testing.T) { // starts prometheus receiver with custom config, retrieves metrics from MetricsSink func testScraperMetrics(t *testing.T, targets []*testData, reportExtraScrapeMetrics bool) { + defer testutil.SetFeatureGateForTest(t, enableReportExtraScrapeMetricsGate, reportExtraScrapeMetrics)() + ctx := t.Context() mp, cfg, err := setupMockPrometheus(targets...) require.NoErrorf(t, err, "Failed to create Prometheus config: %v", err) @@ -55,10 +58,9 @@ func testScraperMetrics(t *testing.T, targets []*testData, reportExtraScrapeMetr cms := new(consumertest.MetricsSink) receiver, err := newPrometheusReceiver(receivertest.NewNopSettings(metadata.Type), &Config{ - PrometheusConfig: cfg, - UseStartTimeMetric: false, - StartTimeMetricRegex: "", - ReportExtraScrapeMetrics: reportExtraScrapeMetrics, + PrometheusConfig: cfg, + UseStartTimeMetric: false, + StartTimeMetricRegex: "", }, cms) require.NoError(t, err, "Failed to create Prometheus receiver: %v", err) From b77d5b32decc90ecfab5bf020cfa79e50eebe332 Mon Sep 17 00:00:00 2001 From: Jade Guiton Date: Fri, 28 Nov 2025 12:17:12 +0100 Subject: [PATCH 35/41] [chore] Ensure "update-otel workflow failed" issues have logs from all job steps (#44583) #### Description When the `update-otel` workflow fails, an issue titled "update-otel workflow failed" is created, containing the last 100 lines of the CI's log output to help diagnose the problem. However, we've recently seen failures in steps that aren't covered by this log capture, which makes the output useless. (Example: #44580, where the failure was while creating the PR) This PR ensures we capture logs from all steps in the `update-otel` job. --- .github/workflows/update-otel.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/update-otel.yaml b/.github/workflows/update-otel.yaml index eaf7c47ac8942..3c88b02c00891 100644 --- a/.github/workflows/update-otel.yaml +++ b/.github/workflows/update-otel.yaml @@ -50,10 +50,12 @@ jobs: max_attempts: 2 retry_on: error command: | + exec > >(tee -a log.out) 2>&1 cd opentelemetry-collector-contrib make update-otel OTEL_STABLE_VERSION=${{ env.LAST_COMMIT }} OTEL_VERSION=${{ env.LAST_COMMIT }} - name: Push and create PR - run: | + run: | + exec > >(tee -a log.out) 2>&1 cd opentelemetry-collector-contrib git push --set-upstream origin ${{ env.BRANCH_NAME }} gh pr create --base main --title "[chore] Update core dependencies" --body "This PR updates the opentelemetry-collector modules to open-telemetry/opentelemetry-collector@${{ env.LAST_COMMIT }}" --draft From 0da45d08f4b75d15d514c1c82ff1a31a1c955f77 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 28 Nov 2025 12:17:38 +0000 Subject: [PATCH 36/41] Updating readme for oracledb top_query and samples collection --- receiver/oracledbreceiver/README.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/receiver/oracledbreceiver/README.md b/receiver/oracledbreceiver/README.md index cce8c83d9cd89..5e157eed495c1 100644 --- a/receiver/oracledbreceiver/README.md +++ b/receiver/oracledbreceiver/README.md @@ -94,3 +94,26 @@ receivers: enabled: true ``` +## Enabling events. + + +The following is a generic configuration that can be used for the default logs and metrics scraped +by the oracledb receiver. + +```yaml +receivers: + oracledb: + collection_interval: 10s # interval for overall collection + datasource: "oracle://otel:password@localhost:51521/XE" + events: + db.server.query_sample: + enabled: true + db.server.top_query: + enabled: true + top_query_collection: # this collection exports the most expensive queries as logs + max_query_sample_count: 1000 # maximum number of samples collected from db to filter the top N + top_query_count: 200 # The maximum number of queries (N) for which the metrics would be reported + collection_interval: 60s # collection interval for top query collection specifically + query_sample_collection: # this collection exports the currently (relate to the query time) executing queries as logs + max_rows_per_query: 100 # the maximum number of samples to bre reported. +``` \ No newline at end of file From aa2e8437591a0ece1f6426755bc17efd77226189 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 28 Nov 2025 13:20:10 +0000 Subject: [PATCH 37/41] change log --- ...racledb_top_query_collection_interval.yaml | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .chloggen/oracledb_top_query_collection_interval.yaml diff --git a/.chloggen/oracledb_top_query_collection_interval.yaml b/.chloggen/oracledb_top_query_collection_interval.yaml new file mode 100644 index 0000000000000..b716b87da1497 --- /dev/null +++ b/.chloggen/oracledb_top_query_collection_interval.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) +component: receiver/oracledbreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added independent collection interval config for Oracle top query metrics collection + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [44607] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] \ No newline at end of file From b0fcff4939a5cf613cdb8d1ef525dc6e8c808cee Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 28 Nov 2025 13:45:57 +0000 Subject: [PATCH 38/41] fix lint --- .chloggen/oracledb_top_query_collection_interval.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.chloggen/oracledb_top_query_collection_interval.yaml b/.chloggen/oracledb_top_query_collection_interval.yaml index b716b87da1497..564f954a253de 100644 --- a/.chloggen/oracledb_top_query_collection_interval.yaml +++ b/.chloggen/oracledb_top_query_collection_interval.yaml @@ -4,7 +4,7 @@ change_type: enhancement # The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog) -component: receiver/oracledbreceiver +component: receiver/oracledb # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). note: Added independent collection interval config for Oracle top query metrics collection From cdd4c00cb85a912fe6bc5c8fec62bad127fd8bc9 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 28 Nov 2025 14:45:43 +0000 Subject: [PATCH 39/41] unit tests --- receiver/oracledbreceiver/scraper_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 11b1f3dc99fb0..941b88680d4b8 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -307,7 +307,7 @@ func TestScraper_ScrapeTopNLogs(t *testing.T) { assert.Equal(t, "db.server.top_query", logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).EventName()) assert.NoError(t, errs) - assert.True(t, scrpr.lastExecutionTimestamp.After(collectionTriggerTime), "lastExecutionTimestamp hasn't set after a successful collection") + assert.False(t, scrpr.lastExecutionTimestamp.After(collectionTriggerTime), "lastExecutionTimestamp hasn't set after a successful collection. LET, currentTime: "+scrpr.lastExecutionTimestamp.GoString()+" & "+collectionTriggerTime.String()) } }) } From 30318f171219bc7024e1aea7304ebde70dad1cd4 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 28 Nov 2025 14:52:35 +0000 Subject: [PATCH 40/41] unit tests --- receiver/oracledbreceiver/scraper_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 941b88680d4b8..ec8592b4efcf5 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -307,7 +307,7 @@ func TestScraper_ScrapeTopNLogs(t *testing.T) { assert.Equal(t, "db.server.top_query", logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).EventName()) assert.NoError(t, errs) - assert.False(t, scrpr.lastExecutionTimestamp.After(collectionTriggerTime), "lastExecutionTimestamp hasn't set after a successful collection. LET, currentTime: "+scrpr.lastExecutionTimestamp.GoString()+" & "+collectionTriggerTime.String()) + assert.True(t, scrpr.lastExecutionTimestamp.After(collectionTriggerTime), "lastExecutionTimestamp hasn't set after a successful collection. LET, currentTime: "+scrpr.lastExecutionTimestamp.GoString()+" & "+collectionTriggerTime.String()) } }) } From 8424bc06d392efe8f8166ff524117faf68b3d271 Mon Sep 17 00:00:00 2001 From: sreenathv Date: Fri, 28 Nov 2025 15:15:17 +0000 Subject: [PATCH 41/41] fix unit tests --- receiver/oracledbreceiver/scraper_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index ec8592b4efcf5..087be7c481e72 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -292,7 +292,7 @@ func TestScraper_ScrapeTopNLogs(t *testing.T) { }() require.NoError(t, err) expectedQueryPlanFile := filepath.Join("testdata", "expectedQueryTextAndPlanQuery.yaml") - collectionTriggerTime := time.Now() + assert.True(t, scrpr.lastExecutionTimestamp.IsZero(), "No value exists on lastExecutionTimestamp before any collection.") logs, err := scrpr.scrapeLogs(t.Context()) @@ -307,7 +307,7 @@ func TestScraper_ScrapeTopNLogs(t *testing.T) { assert.Equal(t, "db.server.top_query", logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).EventName()) assert.NoError(t, errs) - assert.True(t, scrpr.lastExecutionTimestamp.After(collectionTriggerTime), "lastExecutionTimestamp hasn't set after a successful collection. LET, currentTime: "+scrpr.lastExecutionTimestamp.GoString()+" & "+collectionTriggerTime.String()) + assert.False(t, scrpr.lastExecutionTimestamp.IsZero(), "lastExecutionTimestamp hasn't set after a successful collection.") } }) }