diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 9d5fb1a20f5..6b3be24bc8b 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -42,6 +42,8 @@ jobs: run: make BUILD_IN_CONTAINER=false check-doc - name: Check White Noise. run: make BUILD_IN_CONTAINER=false check-white-noise + - name: Check Modernize + run: make BUILD_IN_CONTAINER=false check-modernize test: runs-on: ubuntu-24.04 diff --git a/CHANGELOG.md b/CHANGELOG.md index 46beb0a7113..61eee8a099e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ * [FEATURE] Querier: Support for configuring query optimizers and enabling XFunctions in the Thanos engine. #6873 * [FEATURE] Query Frontend: Add support /api/v1/format_query API for formatting queries. #6893 * [FEATURE] Query Frontend: Add support for /api/v1/parse_query API (experimental) to parse a PromQL expression and return it as a JSON-formatted AST (abstract syntax tree). #6978 +* [ENHANCEMENT] Modernizes the entire codebase by using go modernize tool. #7005 * [ENHANCEMENT] Overrides Exporter: Expose all fields that can be converted to float64. Also, the label value `max_local_series_per_metric` got renamed to `max_series_per_metric`, and `max_local_series_per_user` got renamed to `max_series_per_user`. #6979 * [ENHANCEMENT] Ingester: Add `cortex_ingester_tsdb_wal_replay_unknown_refs_total` and `cortex_ingester_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during wal/wbl replaying. #6945 * [ENHANCEMENT] Ruler: Emit an error message when the rule synchronization fails. #6902 diff --git a/Makefile b/Makefile index ae4f3ab7cc4..773462516cd 100644 --- a/Makefile +++ b/Makefile @@ -126,7 +126,7 @@ GOVOLUMES= -v $(shell pwd)/.cache:/go/cache:delegated,z \ -v $(shell pwd)/.pkg:/go/pkg:delegated,z \ -v $(shell pwd):/go/src/github.com/cortexproject/cortex:delegated,z -exes $(EXES) protos $(PROTO_GOS) lint test cover shell mod-check check-protos web-build web-pre web-deploy doc: build-image/$(UPTODATE) +exes $(EXES) protos $(PROTO_GOS) lint test cover shell mod-check check-protos web-build web-pre web-deploy doc check-modernize: build-image/$(UPTODATE) @mkdir -p $(shell pwd)/.pkg @mkdir -p $(shell pwd)/.cache @echo @@ -308,6 +308,12 @@ clean-white-noise: check-white-noise: clean-white-noise @git diff --exit-code --quiet -- '*.md' || (echo "Please remove trailing whitespaces running 'make clean-white-noise'" && false) +modernize: + go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@v0.20.0 -fix ./... + +check-modernize: modernize + @git diff --exit-code -- . || (echo "Please modernize running 'make modernize'" && false) + web-serve: cd website && hugo --config config.toml --minify -v server diff --git a/cmd/cortex/main_test.go b/cmd/cortex/main_test.go index 5c719491297..2a5d01a61a6 100644 --- a/cmd/cortex/main_test.go +++ b/cmd/cortex/main_test.go @@ -225,7 +225,6 @@ func TestExpandEnv(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.in, func(t *testing.T) { _ = os.Setenv("y", "y") output := expandEnv([]byte(test.in)) @@ -263,7 +262,6 @@ func TestParseConfigFileParameter(t *testing.T) { {"--config.expand-env --opt1 --config.file=foo", "foo", true}, } for _, test := range tests { - test := test t.Run(test.args, func(t *testing.T) { args := strings.Split(test.args, " ") configFile, expandENV := parseConfigFileParameter(args) diff --git a/cmd/thanosconvert/main.go b/cmd/thanosconvert/main.go index bec41a00270..24d2dbc4b69 100644 --- a/cmd/thanosconvert/main.go +++ b/cmd/thanosconvert/main.go @@ -79,7 +79,7 @@ func main() { } -func fatal(msg string, args ...interface{}) { +func fatal(msg string, args ...any) { fmt.Fprintf(os.Stderr, msg+"\n", args...) os.Exit(1) } diff --git a/integration/e2e/composite_service.go b/integration/e2e/composite_service.go index 0dd840db004..2ed6e1ef58a 100644 --- a/integration/e2e/composite_service.go +++ b/integration/e2e/composite_service.go @@ -84,7 +84,7 @@ func (s *CompositeHTTPService) SumMetrics(metricNames []string, opts ...MetricsO return nil, fmt.Errorf("unexpected mismatching sum metrics results (got %d, expected %d)", len(partials), len(sums)) } - for i := 0; i < len(sums); i++ { + for i := range sums { sums[i] += partials[i] } } diff --git a/integration/e2e/logger.go b/integration/e2e/logger.go index 5152ed5e739..1a25c09adaa 100644 --- a/integration/e2e/logger.go +++ b/integration/e2e/logger.go @@ -29,7 +29,7 @@ func NewLogger(w io.Writer) *Logger { } } -func (l *Logger) Log(keyvals ...interface{}) error { +func (l *Logger) Log(keyvals ...any) error { log := strings.Builder{} log.WriteString(time.Now().Format("15:04:05")) diff --git a/integration/e2e/metrics.go b/integration/e2e/metrics.go index 988880e7941..143b2fa73b4 100644 --- a/integration/e2e/metrics.go +++ b/integration/e2e/metrics.go @@ -2,6 +2,7 @@ package e2e import ( "math" + "slices" io_prometheus_client "github.com/prometheus/client_model/go" ) @@ -143,12 +144,7 @@ func EqualsAmong(values ...float64) func(sums ...float64) bool { if len(sums) != 1 { panic("equals among: expected one value") } - for _, value := range values { - if sums[0] == value { - return true - } - } - return false + return slices.Contains(values, sums[0]) } } diff --git a/integration/e2e/scenario.go b/integration/e2e/scenario.go index 19938fedcd1..3535e77deb7 100644 --- a/integration/e2e/scenario.go +++ b/integration/e2e/scenario.go @@ -163,7 +163,7 @@ func (s *Scenario) shutdown() { "--filter", fmt.Sprintf("network=%s", s.networkName), ); err == nil { - for _, containerID := range strings.Split(string(out), "\n") { + for containerID := range strings.SplitSeq(string(out), "\n") { containerID = strings.TrimSpace(containerID) if containerID == "" { continue diff --git a/integration/e2e/service.go b/integration/e2e/service.go index bc99429e1b1..c3fb7ad0fb2 100644 --- a/integration/e2e/service.go +++ b/integration/e2e/service.go @@ -503,7 +503,7 @@ type LinePrefixLogger struct { } func (w *LinePrefixLogger) Write(p []byte) (n int, err error) { - for _, line := range strings.Split(string(p), "\n") { + for line := range strings.SplitSeq(string(p), "\n") { // Skip empty lines line = strings.TrimSpace(line) if line == "" { @@ -698,7 +698,7 @@ func (s *HTTPService) WaitRemovedMetric(metricName string, opts ...MetricsOption func parseDockerIPv4Port(out string) (int, error) { // The "docker port" output may be multiple lines if both IPv4 and IPv6 are supported, // so we need to parse each line. - for _, line := range strings.Split(out, "\n") { + for line := range strings.SplitSeq(out, "\n") { matches := dockerIPv4PortPattern.FindStringSubmatch(strings.TrimSpace(line)) if len(matches) != 2 { continue diff --git a/integration/e2e/util.go b/integration/e2e/util.go index a7c164fea3b..c7af4141574 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -2,6 +2,7 @@ package e2e import ( "context" + "maps" "math" "math/rand" "net/http" @@ -63,9 +64,7 @@ func MergeFlagsWithoutRemovingEmpty(inputs ...map[string]string) map[string]stri output := map[string]string{} for _, input := range inputs { - for name, value := range input { - output[name] = value - } + maps.Copy(output, input) } return output @@ -211,7 +210,7 @@ func GenerateSeriesWithSamples( startTMillis := tsMillis samples := make([]prompb.Sample, numSamples) - for i := 0; i < numSamples; i++ { + for i := range numSamples { scrapeJitter := rand.Int63n(10) + 1 // add a jitter to simulate real-world scenarios, refer to: https://github.com/prometheus/prometheus/issues/13213 samples[i] = prompb.Sample{ Timestamp: startTMillis + scrapeJitter, @@ -288,11 +287,11 @@ func CreateNHBlock( }() app := h.Appender(ctx) - for i := 0; i < len(series); i++ { + for i := range series { num := random.Intn(i + 1) var ref storage.SeriesRef start := RandRange(rnd, mint, maxt) - for j := 0; j < numNHSamples; j++ { + for j := range numNHSamples { if num%2 == 0 { // append float histogram ref, err = app.AppendHistogram(ref, series[i], start, nil, tsdbutil.GenerateTestFloatHistogram(int64(i+j))) @@ -372,11 +371,11 @@ func CreateBlock( }() app := h.Appender(ctx) - for i := 0; i < len(series); i++ { + for i := range series { var ref storage.SeriesRef start := RandRange(rnd, mint, maxt) - for j := 0; j < numSamples; j++ { + for j := range numSamples { ref, err = app.Append(ref, series[i], start, float64(i+j)) if err != nil { if rerr := app.Rollback(); rerr != nil { @@ -519,7 +518,7 @@ func GenerateV2SeriesWithSamples( startTMillis := tsMillis samples := make([]writev2.Sample, numSamples) - for i := 0; i < numSamples; i++ { + for i := range numSamples { scrapeJitter := rand.Int63n(10) + 1 // add a jitter to simulate real-world scenarios, refer to: https://github.com/prometheus/prometheus/issues/13213 samples[i] = writev2.Sample{ Timestamp: startTMillis + scrapeJitter, diff --git a/pkg/alertmanager/alertmanager_http.go b/pkg/alertmanager/alertmanager_http.go index 1b27ef7b9ee..2a313b3700b 100644 --- a/pkg/alertmanager/alertmanager_http.go +++ b/pkg/alertmanager/alertmanager_http.go @@ -96,12 +96,12 @@ type StatusHandler struct { // ServeHTTP serves the status of the alertmanager. func (s StatusHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { - var clusterInfo map[string]interface{} + var clusterInfo map[string]any if s.am.peer != nil { clusterInfo = s.am.peer.Info() } err := statusTemplate.Execute(w, struct { - ClusterInfo map[string]interface{} + ClusterInfo map[string]any }{ ClusterInfo: clusterInfo, }) diff --git a/pkg/alertmanager/alertmanager_http_test.go b/pkg/alertmanager/alertmanager_http_test.go index 987221593ab..126de01695a 100644 --- a/pkg/alertmanager/alertmanager_http_test.go +++ b/pkg/alertmanager/alertmanager_http_test.go @@ -1,7 +1,6 @@ package alertmanager import ( - "context" "io" "net/http/httptest" "testing" @@ -14,8 +13,7 @@ import ( ) func TestMultitenantAlertmanager_GetStatusHandler(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() var peer *cluster.Peer { logger := promslog.NewNopLogger() diff --git a/pkg/alertmanager/alertmanager_ring_test.go b/pkg/alertmanager/alertmanager_ring_test.go index 3e4d460252e..ec1f3008fa3 100644 --- a/pkg/alertmanager/alertmanager_ring_test.go +++ b/pkg/alertmanager/alertmanager_ring_test.go @@ -45,7 +45,6 @@ func TestIsHealthyForAlertmanagerOperations(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { actual := testData.instance.IsHealthy(RingOp, testData.timeout, time.Now()) diff --git a/pkg/alertmanager/alertmanager_test.go b/pkg/alertmanager/alertmanager_test.go index c4ed3064fa2..54d4cd45846 100644 --- a/pkg/alertmanager/alertmanager_test.go +++ b/pkg/alertmanager/alertmanager_test.go @@ -48,7 +48,7 @@ func TestSilencesLimits(t *testing.T) { } // create silences up to maxSilencesCount - for i := 0; i < maxSilencesCount; i++ { + for range maxSilencesCount { err := am.silences.Set(createSilences()) require.NoError(t, err) } @@ -136,7 +136,7 @@ route: now := time.Now() - for i := 0; i < alertGroups; i++ { + for i := range alertGroups { alertName := model.LabelValue(fmt.Sprintf("Alert-%d", i)) inputAlerts := []*types.Alert{ @@ -174,7 +174,7 @@ route: } // Give it some time, as alerts are sent to dispatcher asynchronously. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { return testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(` # HELP alertmanager_dispatcher_aggregation_group_limit_reached_total Number of times when dispatcher failed to create new aggregation group due to limit. # TYPE alertmanager_dispatcher_aggregation_group_limit_reached_total counter diff --git a/pkg/alertmanager/alertstore/bucketclient/bucket_client.go b/pkg/alertmanager/alertstore/bucketclient/bucket_client.go index 4252f6703bf..7a2d3dad2bd 100644 --- a/pkg/alertmanager/alertstore/bucketclient/bucket_client.go +++ b/pkg/alertmanager/alertstore/bucketclient/bucket_client.go @@ -75,7 +75,7 @@ func (s *BucketAlertStore) GetAlertConfigs(ctx context.Context, userIDs []string cfgs = make(map[string]alertspb.AlertConfigDesc, len(userIDs)) ) - err := concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(userIDs), fetchConcurrency, func(ctx context.Context, job interface{}) error { + err := concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(userIDs), fetchConcurrency, func(ctx context.Context, job any) error { userID := job.(string) cfg, uBucket, err := s.getAlertConfig(ctx, userID) diff --git a/pkg/alertmanager/alertstore/config.go b/pkg/alertmanager/alertstore/config.go index bca00768d7d..5d32e6dd9e1 100644 --- a/pkg/alertmanager/alertstore/config.go +++ b/pkg/alertmanager/alertstore/config.go @@ -2,6 +2,7 @@ package alertstore import ( "flag" + "slices" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local" @@ -28,10 +29,5 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // IsFullStateSupported returns if the given configuration supports access to FullState objects. func (cfg *Config) IsFullStateSupported() bool { - for _, backend := range bucket.SupportedBackends { - if cfg.Backend == backend { - return true - } - } - return false + return slices.Contains(bucket.SupportedBackends, cfg.Backend) } diff --git a/pkg/alertmanager/alertstore/store_test.go b/pkg/alertmanager/alertstore/store_test.go index fd7fb2816a2..2796b6ed041 100644 --- a/pkg/alertmanager/alertstore/store_test.go +++ b/pkg/alertmanager/alertstore/store_test.go @@ -21,7 +21,7 @@ var ( ) func TestAlertStore_ListAllUsers(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -46,7 +46,7 @@ func TestAlertStore_ListAllUsers(t *testing.T) { } func TestAlertStore_SetAndGetAlertConfig(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -84,7 +84,7 @@ func TestAlertStore_SetAndGetAlertConfig(t *testing.T) { } func TestStore_GetAlertConfigs(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -129,7 +129,7 @@ func TestStore_GetAlertConfigs(t *testing.T) { } func TestAlertStore_DeleteAlertConfig(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -169,14 +169,14 @@ func TestAlertStore_DeleteAlertConfig(t *testing.T) { }) } -func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertStore, b *mockBucket, client interface{})) { +func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertStore, b *mockBucket, client any)) { bucketClient := objstore.NewInMemBucket() mBucketClient := &mockBucket{Bucket: bucketClient} bucketStore := bucketclient.NewBucketAlertStore(mBucketClient, nil, log.NewNopLogger()) stores := map[string]struct { store AlertStore - client interface{} + client any }{ "bucket": {store: bucketStore, client: mBucketClient}, } @@ -188,7 +188,7 @@ func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertSto } } -func objectExists(bucketClient interface{}, key string) (bool, error) { +func objectExists(bucketClient any, key string) (bool, error) { if typed, ok := bucketClient.(objstore.Bucket); ok { return typed.Exists(context.Background(), key) } diff --git a/pkg/alertmanager/api.go b/pkg/alertmanager/api.go index f546bbd4cea..cbac5bd89c9 100644 --- a/pkg/alertmanager/api.go +++ b/pkg/alertmanager/api.go @@ -283,7 +283,7 @@ func (am *MultitenantAlertmanager) ListAllConfigs(w http.ResponseWriter, r *http } done := make(chan struct{}) - iter := make(chan interface{}) + iter := make(chan any) go func() { util.StreamWriteYAMLResponse(w, iter, logger) @@ -321,7 +321,7 @@ func (am *MultitenantAlertmanager) ListAllConfigs(w http.ResponseWriter, r *http // validateAlertmanagerConfig recursively scans the input config looking for data types for which // we have a specific validation and, whenever encountered, it runs their validation. Returns the // first error or nil if validation succeeds. -func validateAlertmanagerConfig(cfg interface{}) error { +func validateAlertmanagerConfig(cfg any) error { v := reflect.ValueOf(cfg) t := v.Type() diff --git a/pkg/alertmanager/api_test.go b/pkg/alertmanager/api_test.go index e70af952478..8c0a097d84c 100644 --- a/pkg/alertmanager/api_test.go +++ b/pkg/alertmanager/api_test.go @@ -867,7 +867,7 @@ receivers: func TestValidateAlertmanagerConfig(t *testing.T) { tests := map[string]struct { - input interface{} + input any expected error }{ "*HTTPClientConfig": { diff --git a/pkg/alertmanager/distributor_test.go b/pkg/alertmanager/distributor_test.go index beb2277e74b..fed453c3a89 100644 --- a/pkg/alertmanager/distributor_test.go +++ b/pkg/alertmanager/distributor_test.go @@ -287,7 +287,7 @@ func TestDistributor_DistributeRequest(t *testing.T) { // Since the response is sent as soon as the quorum is reached, when we // reach this point the 3rd AM may not have received the request yet. // To avoid flaky test we retry until we hit the desired state within a reasonable timeout. - test.Poll(t, time.Second, c.expectedTotalCalls, func() interface{} { + test.Poll(t, time.Second, c.expectedTotalCalls, func() any { totalReqCount := 0 for _, a := range ams { reqCount := a.requestsCount(route) @@ -306,7 +306,7 @@ func TestDistributor_DistributeRequest(t *testing.T) { func prepare(t *testing.T, numAM, numHappyAM, replicationFactor int, responseBody []byte) (*Distributor, []*mockAlertmanager, func()) { ams := []*mockAlertmanager{} remainingFailure := atomic.NewInt32(int32(numAM - numHappyAM)) - for i := 0; i < numAM; i++ { + for i := range numAM { ams = append(ams, newMockAlertmanager(i, remainingFailure, responseBody)) } @@ -329,7 +329,7 @@ func prepare(t *testing.T, numAM, numHappyAM, replicationFactor int, responseBod t.Cleanup(func() { assert.NoError(t, closer.Close()) }) err := kvStore.CAS(context.Background(), RingKey, - func(_ interface{}) (interface{}, bool, error) { + func(_ any) (any, bool, error) { return &ring.Desc{ Ingesters: amDescs, }, true, nil @@ -346,7 +346,7 @@ func prepare(t *testing.T, numAM, numHappyAM, replicationFactor int, responseBod }, RingNameForServer, RingKey, nil, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), amRing)) - test.Poll(t, time.Second, numAM, func() interface{} { + test.Poll(t, time.Second, numAM, func() any { return amRing.InstancesCount() }) diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 47b02d36d1d..1a3e2b3c078 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -1153,7 +1153,7 @@ func (am *MultitenantAlertmanager) ReadFullStateForUser(ctx context.Context, use // Note that the jobs swallow the errors - this is because we want to give each replica a chance to respond. jobs := concurrency.CreateJobsFromStrings(addrs) - err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { + err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job any) error { addr := job.(string) level.Debug(am.logger).Log("msg", "contacting replica for full state", "user", userID, "addr", addr) diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index acbf9eb66aa..fe00b3c94e4 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -195,7 +195,7 @@ receivers: reg := prometheus.NewPedanticRegistry() am, err := createMultitenantAlertmanager(cfg, nil, nil, store, nil, nil, log.NewNopLogger(), reg) require.NoError(t, err) - for i := 0; i < 5; i++ { + for range 5 { err = am.loadAndSyncConfigs(context.Background(), reasonPeriodic) require.NoError(t, err) require.Len(t, am.alertmanagers, 2) @@ -1128,7 +1128,7 @@ func TestMultitenantAlertmanager_InitialSyncWithSharding(t *testing.T) { // Setup the initial instance state in the ring. if tt.existing { - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) ringDesc.AddIngester(amConfig.ShardingRing.InstanceID, amConfig.ShardingRing.InstanceAddr, "", tt.initialTokens, tt.initialState, time.Now()) return ringDesc, true, nil @@ -1529,7 +1529,7 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { am, err := createMultitenantAlertmanager(amConfig, nil, nil, alertStore, ringStore, nil, log.NewNopLogger(), reg) require.NoError(t, err) - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) tt.setupRing(ringDesc) return ringDesc, true, nil @@ -1545,7 +1545,7 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { assert.Equal(t, float64(1), metrics.GetSumOfCounters("cortex_alertmanager_sync_configs_total")) // Change the ring topology. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) tt.updateRing(ringDesc) return ringDesc, true, nil @@ -1556,7 +1556,7 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { if tt.expected { expectedSyncs++ } - test.Poll(t, 3*time.Second, float64(expectedSyncs), func() interface{} { + test.Poll(t, 3*time.Second, float64(expectedSyncs), func() any { metrics := regs.BuildMetricFamiliesPerUser() return metrics.GetSumOfCounters("cortex_alertmanager_sync_configs_total") }) @@ -1584,7 +1584,7 @@ func TestMultitenantAlertmanager_RingLifecyclerShouldAutoForgetUnhealthyInstance defer services.StopAndAwaitTerminated(ctx, am) //nolint:errcheck tg := ring.NewRandomTokenGenerator() - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) instance := ringDesc.AddIngester(unhealthyInstanceID, "127.0.0.1", "", tg.GenerateTokens(ringDesc, unhealthyInstanceID, "", RingNumTokens, true), ring.ACTIVE, time.Now()) instance.Timestamp = time.Now().Add(-(ringAutoForgetUnhealthyPeriods + 1) * heartbeatTimeout).Unix() @@ -1593,7 +1593,7 @@ func TestMultitenantAlertmanager_RingLifecyclerShouldAutoForgetUnhealthyInstance return ringDesc, true, nil })) - test.Poll(t, time.Second, false, func() interface{} { + test.Poll(t, time.Second, false, func() any { d, err := ringStore.Get(ctx, RingKey) if err != nil { return err @@ -2066,7 +2066,7 @@ func TestAlertmanager_StateReplicationWithSharding_InitialSyncFromPeers(t *testi } // 2.c. Wait for the silence replication to be attempted; note this is asynchronous. { - test.Poll(t, 5*time.Second, float64(1), func() interface{} { + test.Poll(t, 5*time.Second, float64(1), func() any { metrics := registries.BuildMetricFamiliesPerUser() return metrics.GetSumOfCounters("cortex_alertmanager_state_replication_total") }) diff --git a/pkg/alertmanager/rate_limited_notifier_test.go b/pkg/alertmanager/rate_limited_notifier_test.go index 1d35c9d99a7..28de624cb5a 100644 --- a/pkg/alertmanager/rate_limited_notifier_test.go +++ b/pkg/alertmanager/rate_limited_notifier_test.go @@ -44,7 +44,7 @@ func runNotifications(t *testing.T, rateLimitedNotifier *rateLimitedNotifier, co success := 0 rateLimited := 0 - for i := 0; i < count; i++ { + for range count { retry, err := rateLimitedNotifier.Notify(context.Background(), &types.Alert{}) switch err { diff --git a/pkg/api/api.go b/pkg/api/api.go index ebe64440f9c..e124fec3e68 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -45,7 +45,7 @@ import ( // DistributorPushWrapper wraps around a push. It is similar to middleware.Interface. type DistributorPushWrapper func(next push.Func) push.Func -type ConfigHandler func(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc +type ConfigHandler func(actualCfg any, defaultCfg any) http.HandlerFunc type Config struct { ResponseCompression bool `yaml:"response_compression_enabled"` @@ -262,7 +262,7 @@ func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, tar } // RegisterAPI registers the standard endpoints associated with a running Cortex. -func (a *API) RegisterAPI(httpPathPrefix string, actualCfg interface{}, defaultCfg interface{}) { +func (a *API) RegisterAPI(httpPathPrefix string, actualCfg any, defaultCfg any) { a.indexPage.AddLink(SectionAdminEndpoints, "/config", "Current Config (including the default values)") a.indexPage.AddLink(SectionAdminEndpoints, "/config?mode=diff", "Current Config (show only values that differ from the defaults)") diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index df2ec239f03..f864199ee37 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -21,7 +21,7 @@ const ( type FakeLogger struct{} -func (fl *FakeLogger) Log(keyvals ...interface{}) error { +func (fl *FakeLogger) Log(keyvals ...any) error { return nil } @@ -187,12 +187,11 @@ func Benchmark_Compression(b *testing.B) { req.Header.Set(acceptEncodingHeader, "gzip") b.ReportAllocs() - b.ResetTimer() // Reusing the array to read the body and avoid allocation on the test encRespBody := make([]byte, len(respBody)) - for i := 0; i < b.N; i++ { + for b.Loop() { resp, err := client.Do(req) require.NoError(b, err) diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 54a55318542..2b30e8aa587 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "html/template" + "maps" "net/http" "path" "sync" @@ -70,9 +71,7 @@ func (pc *IndexPageContent) GetContent() map[string]map[string]string { result := map[string]map[string]string{} for k, v := range pc.content { sm := map[string]string{} - for smK, smV := range v { - sm[smK] = smV - } + maps.Copy(sm, v) result[k] = sm } return result @@ -100,7 +99,7 @@ var indexPageTemplate = ` func indexHandler(httpPathPrefix string, content *IndexPageContent) http.HandlerFunc { templ := template.New("main") - templ.Funcs(map[string]interface{}{ + templ.Funcs(map[string]any{ "AddPathPrefix": func(link string) string { return path.Join(httpPathPrefix, link) }, @@ -115,16 +114,16 @@ func indexHandler(httpPathPrefix string, content *IndexPageContent) http.Handler } } -func (cfg *Config) configHandler(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc { +func (cfg *Config) configHandler(actualCfg any, defaultCfg any) http.HandlerFunc { if cfg.CustomConfigHandler != nil { return cfg.CustomConfigHandler(actualCfg, defaultCfg) } return DefaultConfigHandler(actualCfg, defaultCfg) } -func DefaultConfigHandler(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc { +func DefaultConfigHandler(actualCfg any, defaultCfg any) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - var output interface{} + var output any switch r.URL.Query().Get("mode") { case "diff": defaultCfgObj, err := util.YAMLMarshalUnmarshal(defaultCfg) diff --git a/pkg/api/handlers_test.go b/pkg/api/handlers_test.go index 9b8b7930683..cf3b7ee1a75 100644 --- a/pkg/api/handlers_test.go +++ b/pkg/api/handlers_test.go @@ -92,7 +92,7 @@ func TestConfigDiffHandler(t *testing.T) { name string expectedStatusCode int expectedBody string - actualConfig func() interface{} + actualConfig func() any }{ { name: "no config parameters overridden", @@ -101,7 +101,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "slice changed", - actualConfig: func() interface{} { + actualConfig: func() any { c := newDefaultDiffConfigMock() c.MySlice = append(c.MySlice, "value3") return c @@ -114,7 +114,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "string in nested struct changed", - actualConfig: func() interface{} { + actualConfig: func() any { c := newDefaultDiffConfigMock() c.MyNestedStruct.MyString = "string2" return c @@ -125,7 +125,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "bool in nested struct changed", - actualConfig: func() interface{} { + actualConfig: func() any { c := newDefaultDiffConfigMock() c.MyNestedStruct.MyBool = true return c @@ -136,7 +136,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "test invalid input", - actualConfig: func() interface{} { + actualConfig: func() any { c := "x" return &c }, @@ -148,7 +148,7 @@ func TestConfigDiffHandler(t *testing.T) { defaultCfg := newDefaultDiffConfigMock() t.Run(tc.name, func(t *testing.T) { - var actualCfg interface{} + var actualCfg any if tc.actualConfig != nil { actualCfg = tc.actualConfig() } else { @@ -173,7 +173,7 @@ func TestConfigDiffHandler(t *testing.T) { func TestConfigOverrideHandler(t *testing.T) { cfg := &Config{ - CustomConfigHandler: func(_ interface{}, _ interface{}) http.HandlerFunc { + CustomConfigHandler: func(_ any, _ any) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { _, err := w.Write([]byte("config")) assert.NoError(t, err) diff --git a/pkg/api/queryapi/compression.go b/pkg/api/queryapi/compression.go index 7dd6fcbacab..b7c4f0ce008 100644 --- a/pkg/api/queryapi/compression.go +++ b/pkg/api/queryapi/compression.go @@ -53,8 +53,8 @@ func (c *compressedResponseWriter) Close() { // Constructs a new compressedResponseWriter based on client request headers. func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { - encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") - for _, encoding := range encodings { + encodings := strings.SplitSeq(req.Header.Get(acceptEncodingHeader), ",") + for encoding := range encodings { switch strings.TrimSpace(encoding) { case zstdEncoding: encoder, err := zstd.NewWriter(writer) diff --git a/pkg/api/queryapi/compression_test.go b/pkg/api/queryapi/compression_test.go index bcd36a3728c..ce949b63ee9 100644 --- a/pkg/api/queryapi/compression_test.go +++ b/pkg/api/queryapi/compression_test.go @@ -110,7 +110,7 @@ func TestNewCompressedResponseWriter_MultipleEncodings(t *testing.T) { tests := []struct { header string expectEnc string - expectType interface{} + expectType any }{ {"snappy, gzip", snappyEncoding, &snappy.Writer{}}, {"unknown, gzip", gzipEncoding, &gzip.Writer{}}, diff --git a/pkg/api/queryapi/query_api.go b/pkg/api/queryapi/query_api.go index ef9ef4e2801..83eed69ec8b 100644 --- a/pkg/api/queryapi/query_api.go +++ b/pkg/api/queryapi/query_api.go @@ -250,7 +250,7 @@ func (q *QueryAPI) Wrap(f apiFunc) http.HandlerFunc { }.ServeHTTP } -func (q *QueryAPI) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) { +func (q *QueryAPI) respond(w http.ResponseWriter, req *http.Request, data any, warnings annotations.Annotations, query string) { warn, info := warnings.AsStrings(query, 10, 10) resp := &v1.Response{ diff --git a/pkg/api/queryapi/util.go b/pkg/api/queryapi/util.go index 9d85b8a96c7..e9e43e8cb27 100644 --- a/pkg/api/queryapi/util.go +++ b/pkg/api/queryapi/util.go @@ -89,7 +89,7 @@ func returnAPIError(err error) *apiError { } type apiFuncResult struct { - data interface{} + data any err *apiError warnings annotations.Annotations finalizer func() diff --git a/pkg/chunk/cache/background.go b/pkg/chunk/cache/background.go index bfdfb748d89..1e74fe50129 100644 --- a/pkg/chunk/cache/background.go +++ b/pkg/chunk/cache/background.go @@ -83,10 +83,7 @@ const keysPerBatch = 100 // Store writes keys for the cache in the background. func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byte) { for len(keys) > 0 { - num := keysPerBatch - if num > len(keys) { - num = len(keys) - } + num := min(keysPerBatch, len(keys)) bgWrite := backgroundWrite{ keys: keys[:num], diff --git a/pkg/chunk/cache/cache_test.go b/pkg/chunk/cache/cache_test.go index 5209b3e1b21..5ed7314caac 100644 --- a/pkg/chunk/cache/cache_test.go +++ b/pkg/chunk/cache/cache_test.go @@ -22,7 +22,7 @@ func fillCache(t *testing.T, cache cache.Cache) ([]string, []chunkenc.Chunk) { keys := []string{} bufs := [][]byte{} chunks := []chunkenc.Chunk{} - for i := 0; i < 111; i++ { + for i := range 111 { ts := model.TimeFromUnix(int64(i * chunkLen)) promChunk := chunkenc.NewXORChunk() appender, err := promChunk.Appender() @@ -41,7 +41,7 @@ func fillCache(t *testing.T, cache cache.Cache) ([]string, []chunkenc.Chunk) { } func testCacheSingle(t *testing.T, cache cache.Cache, keys []string, chunks []chunkenc.Chunk) { - for i := 0; i < 100; i++ { + for range 100 { index := rand.Intn(len(keys)) key := keys[index] @@ -73,7 +73,7 @@ func testCacheMultiple(t *testing.T, cache cache.Cache, keys []string, chunks [] } func testCacheMiss(t *testing.T, cache cache.Cache) { - for i := 0; i < 100; i++ { + for range 100 { key := strconv.Itoa(rand.Int()) // arbitrary key which should fail: no chunk key is a single integer found, bufs, missing := cache.Fetch(context.Background(), []string{key}) require.Empty(t, found) diff --git a/pkg/chunk/cache/fifo_cache_test.go b/pkg/chunk/cache/fifo_cache_test.go index 50aee975a3c..3515d077357 100644 --- a/pkg/chunk/cache/fifo_cache_test.go +++ b/pkg/chunk/cache/fifo_cache_test.go @@ -44,7 +44,7 @@ func TestFifoCacheEviction(t *testing.T) { // Check put / get works keys := []string{} values := [][]byte{} - for i := 0; i < cnt; i++ { + for i := range cnt { key := fmt.Sprintf("%02d", i) value := make([]byte, len(key)) copy(value, key) @@ -65,7 +65,7 @@ func TestFifoCacheEviction(t *testing.T) { assert.Equal(t, testutil.ToFloat64(c.staleGets), float64(0)) assert.Equal(t, testutil.ToFloat64(c.memoryBytes), float64(cnt*sizeOf(itemTemplate))) - for i := 0; i < cnt; i++ { + for i := range cnt { key := fmt.Sprintf("%02d", i) value, ok := c.Get(ctx, key) require.True(t, ok) @@ -107,7 +107,7 @@ func TestFifoCacheEviction(t *testing.T) { assert.Equal(t, testutil.ToFloat64(c.staleGets), float64(0)) assert.Equal(t, testutil.ToFloat64(c.memoryBytes), float64(cnt*sizeOf(itemTemplate))) - for i := 0; i < cnt-evicted; i++ { + for i := range cnt - evicted { _, ok := c.Get(ctx, fmt.Sprintf("%02d", i)) require.False(t, ok) } @@ -145,7 +145,7 @@ func TestFifoCacheEviction(t *testing.T) { for i := cnt; i < cnt+evicted; i++ { value, ok := c.Get(ctx, fmt.Sprintf("%02d", i)) require.True(t, ok) - require.Equal(t, []byte(fmt.Sprintf("%02d", i*2)), value) + require.Equal(t, fmt.Appendf(nil, "%02d", i*2), value) } assert.Equal(t, testutil.ToFloat64(c.entriesAdded), float64(3)) diff --git a/pkg/chunk/cache/memcached_client.go b/pkg/chunk/cache/memcached_client.go index d1b167e26bd..49c3edd4d79 100644 --- a/pkg/chunk/cache/memcached_client.go +++ b/pkg/chunk/cache/memcached_client.go @@ -180,7 +180,7 @@ func (c *memcachedClient) dialViaCircuitBreaker(_ context.Context, network, addr } c.Unlock() - conn, err := cb.Execute(func() (interface{}, error) { + conn, err := cb.Execute(func() (any, error) { return net.DialTimeout(network, address, c.cbTimeout) }) if err != nil { diff --git a/pkg/chunk/cache/memcached_client_selector_test.go b/pkg/chunk/cache/memcached_client_selector_test.go index 69305670b67..c4364bcb762 100644 --- a/pkg/chunk/cache/memcached_client_selector_test.go +++ b/pkg/chunk/cache/memcached_client_selector_test.go @@ -44,7 +44,7 @@ func TestMemcachedJumpHashSelector_PickSever(t *testing.T) { // to make sure different IPs were discovered during SetServers distribution := make(map[string]int) - for i := 0; i < 100; i++ { + for i := range 100 { key := fmt.Sprintf("key-%d", i) addr, err := s.PickServer(key) require.NoError(t, err) diff --git a/pkg/chunk/cache/memcached_test.go b/pkg/chunk/cache/memcached_test.go index f15c27333cc..ebb2581e9ea 100644 --- a/pkg/chunk/cache/memcached_test.go +++ b/pkg/chunk/cache/memcached_test.go @@ -43,19 +43,19 @@ func testMemcache(t *testing.T, memcache *cache.Memcached) { bufs := make([][]byte, 0, numKeys) // Insert 1000 keys skipping all multiples of 5. - for i := 0; i < numKeys; i++ { + for i := range numKeys { keysIncMissing = append(keysIncMissing, fmt.Sprint(i)) if i%5 == 0 { continue } keys = append(keys, fmt.Sprint(i)) - bufs = append(bufs, []byte(fmt.Sprint(i))) + bufs = append(bufs, fmt.Append(nil, i)) } memcache.Store(ctx, keys, bufs) found, bufs, missing := memcache.Fetch(ctx, keysIncMissing) - for i := 0; i < numKeys; i++ { + for i := range numKeys { if i%5 == 0 { require.Equal(t, fmt.Sprint(i), missing[0]) missing = missing[1:] @@ -118,17 +118,17 @@ func testMemcacheFailing(t *testing.T, memcache *cache.Memcached) { keys := make([]string, 0, numKeys) bufs := make([][]byte, 0, numKeys) // Insert 1000 keys skipping all multiples of 5. - for i := 0; i < numKeys; i++ { + for i := range numKeys { keysIncMissing = append(keysIncMissing, fmt.Sprint(i)) if i%5 == 0 { continue } keys = append(keys, fmt.Sprint(i)) - bufs = append(bufs, []byte(fmt.Sprint(i))) + bufs = append(bufs, fmt.Append(nil, i)) } memcache.Store(ctx, keys, bufs) - for i := 0; i < 10; i++ { + for range 10 { found, bufs, missing := memcache.Fetch(ctx, keysIncMissing) require.Equal(t, len(found), len(bufs)) @@ -182,9 +182,9 @@ func testMemcachedStopping(t *testing.T, memcache *cache.Memcached) { ctx := context.Background() keys := make([]string, 0, numKeys) bufs := make([][]byte, 0, numKeys) - for i := 0; i < numKeys; i++ { + for i := range numKeys { keys = append(keys, fmt.Sprint(i)) - bufs = append(bufs, []byte(fmt.Sprint(i))) + bufs = append(bufs, fmt.Append(nil, i)) } memcache.Store(ctx, keys, bufs) diff --git a/pkg/chunk/cache/redis_cache_test.go b/pkg/chunk/cache/redis_cache_test.go index d0f7c7ca356..154e688066d 100644 --- a/pkg/chunk/cache/redis_cache_test.go +++ b/pkg/chunk/cache/redis_cache_test.go @@ -35,7 +35,7 @@ func TestRedisCache(t *testing.T) { require.Len(t, found, nHit) require.Len(t, missed, 0) - for i := 0; i < nHit; i++ { + for i := range nHit { require.Equal(t, keys[i], found[i]) require.Equal(t, bufs[i], data[i]) } @@ -45,7 +45,7 @@ func TestRedisCache(t *testing.T) { require.Len(t, found, 0) require.Len(t, missed, nMiss) - for i := 0; i < nMiss; i++ { + for i := range nMiss { require.Equal(t, miss[i], missed[i]) } } diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index dd957d264ce..8e1e9a60551 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -475,7 +475,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userLog } c.tenantBucketIndexLastUpdate.DeleteLabelValues(userID) - var blocksToDelete []interface{} + var blocksToDelete []any err := userBucket.Iter(ctx, "", func(name string) error { if err := ctx.Err(); err != nil { return err @@ -492,7 +492,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userLog } var deletedBlocks, failed atomic.Int64 - err = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job interface{}) error { + err = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job any) error { blockID := job.(ulid.ULID) err := block.Delete(ctx, userLogger, userBucket, blockID) if err != nil { @@ -697,7 +697,7 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userLogger log.Logger, us // Delete blocks marked for deletion. We iterate over a copy of deletion marks because // we'll need to manipulate the index (removing blocks which get deleted). begin = time.Now() - blocksToDelete := make([]interface{}, 0, len(idx.BlockDeletionMarks)) + blocksToDelete := make([]any, 0, len(idx.BlockDeletionMarks)) var mux sync.Mutex for _, mark := range idx.BlockDeletionMarks.Clone() { if time.Since(mark.GetDeletionTime()).Seconds() <= c.cfg.DeletionDelay.Seconds() { @@ -709,7 +709,7 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userLogger log.Logger, us // Concurrently deletes blocks marked for deletion, and removes blocks from index. begin = time.Now() - _ = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job interface{}) error { + _ = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job any) error { blockID := job.(ulid.ULID) if err := block.Delete(ctx, userLogger, userBucket, blockID); err != nil { @@ -884,7 +884,7 @@ func (c *BlocksCleaner) iterPartitionGroups(ctx context.Context, userBucket objs // and index are updated accordingly. func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, userID string, partials map[ulid.ULID]error, idx *bucketindex.Index, userBucket objstore.InstrumentedBucket, userLogger log.Logger) { // Collect all blocks with missing meta.json into buffered channel. - blocks := make([]interface{}, 0, len(partials)) + blocks := make([]any, 0, len(partials)) for blockID, blockErr := range partials { // We can safely delete only blocks which are partial because the meta.json is missing. @@ -896,7 +896,7 @@ func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, userID strin var mux sync.Mutex - _ = concurrency.ForEach(ctx, blocks, defaultDeleteBlocksConcurrency, func(ctx context.Context, job interface{}) error { + _ = concurrency.ForEach(ctx, blocks, defaultDeleteBlocksConcurrency, func(ctx context.Context, job any) error { blockID := job.(ulid.ULID) // We can safely delete only partial blocks with a deletion mark. err := metadata.ReadMarker(ctx, userLogger, userBucket, blockID.String(), &metadata.DeletionMark{}) diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go index 787d377d63b..9b13d7c1b91 100644 --- a/pkg/compactor/blocks_cleaner_test.go +++ b/pkg/compactor/blocks_cleaner_test.go @@ -50,7 +50,6 @@ func TestBlocksCleaner(t *testing.T) { {concurrency: 2}, {concurrency: 10}, } { - options := options t.Run(options.String(), func(t *testing.T) { t.Parallel() diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index e9ac396cf2a..65fa95fea07 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -9,6 +9,7 @@ import ( "math/rand" "os" "path/filepath" + "slices" "strings" "time" @@ -353,10 +354,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { } func (cfg *Config) Validate(limits validation.Limits) error { - for _, blockRange := range cfg.BlockRanges { - if blockRange == 0 { - return errors.New("compactor block range period cannot be zero") - } + if slices.Contains(cfg.BlockRanges, 0) { + return errors.New("compactor block range period cannot be zero") } // Each block range period should be divisible by the previous one. for i := 1; i < len(cfg.BlockRanges); i++ { @@ -366,7 +365,7 @@ func (cfg *Config) Validate(limits validation.Limits) error { } // Make sure a valid sharding strategy is being used - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -377,7 +376,7 @@ func (cfg *Config) Validate(limits validation.Limits) error { } // Make sure a valid compaction strategy is being used - if !util.StringsContain(supportedCompactionStrategies, cfg.CompactionStrategy) { + if !slices.Contains(supportedCompactionStrategies, cfg.CompactionStrategy) { return errInvalidCompactionStrategy } @@ -1280,12 +1279,7 @@ func (c *Compactor) isCausedByPermissionDenied(err error) bool { cause = errors.Unwrap(cause) } if multiErr, ok := cause.(errutil.NonNilMultiRootError); ok { - for _, err := range multiErr { - if c.isPermissionDeniedErr(err) { - return true - } - } - return false + return slices.ContainsFunc(multiErr, c.isPermissionDeniedErr) } return c.isPermissionDeniedErr(cause) } diff --git a/pkg/compactor/compactor_paritioning_test.go b/pkg/compactor/compactor_paritioning_test.go index bbb875dad37..593e94d2aec 100644 --- a/pkg/compactor/compactor_paritioning_test.go +++ b/pkg/compactor/compactor_paritioning_test.go @@ -215,7 +215,7 @@ func TestPartitionCompactor_SkipCompactionWhenCmkError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -236,7 +236,7 @@ func TestPartitionCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -312,7 +312,7 @@ func TestPartitionCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersF require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -411,7 +411,7 @@ func TestPartitionCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASing require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -479,7 +479,7 @@ func TestPartitionCompactor_ShouldCompactAndRemoveUserFolder(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -557,7 +557,7 @@ func TestPartitionCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -693,7 +693,7 @@ func TestPartitionCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -834,7 +834,7 @@ func TestPartitionCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testin require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -902,7 +902,7 @@ func TestPartitionCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *t require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1013,7 +1013,7 @@ func TestPartitionCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), c) //nolint:errcheck // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, true, func() any { if _, err := os.Stat(path.Join(dir, "no-compact-mark.json")); err == nil { return true } @@ -1109,7 +1109,7 @@ func TestPartitionCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInst require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1214,7 +1214,7 @@ func TestPartitionCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEn // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 60*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 60*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } @@ -1365,7 +1365,7 @@ func TestPartitionCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingE // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 60*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 60*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) } @@ -1594,7 +1594,7 @@ func TestPartitionCompactor_DeleteLocalSyncFiles(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c1)) // Wait until a run has been completed on first compactor. This happens as soon as compactor starts. - cortex_testutil.Poll(t, 20*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, true, func() any { return prom_testutil.ToFloat64(c1.CompactionRunsCompleted) >= 1 }) @@ -1605,7 +1605,7 @@ func TestPartitionCompactor_DeleteLocalSyncFiles(t *testing.T) { // Now start second compactor, and wait until it runs compaction. require.NoError(t, services.StartAndAwaitRunning(context.Background(), c2)) - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c2.CompactionRunsCompleted) }) @@ -1712,7 +1712,7 @@ func TestPartitionCompactor_ShouldNotHangIfPlannerReturnNothing(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1766,7 +1766,7 @@ func TestPartitionCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSy require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1820,7 +1820,7 @@ func TestPartitionCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFrom require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 19bb759f009..5724c946990 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -193,7 +193,7 @@ func TestCompactor_SkipCompactionWhenCmkError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -214,7 +214,7 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -291,7 +291,7 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -386,7 +386,7 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -450,7 +450,7 @@ func TestCompactor_ShouldCompactAndRemoveUserFolder(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -519,7 +519,7 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -654,7 +654,7 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -787,7 +787,7 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -851,7 +851,7 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -961,7 +961,7 @@ func TestCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), c) //nolint:errcheck // Wait until a run has completed. - cortex_testutil.Poll(t, 5*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, true, func() any { if _, err := os.Stat(path.Join(dir, "no-compact-mark.json")); err == nil { return true } @@ -1047,7 +1047,7 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 5*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1150,7 +1150,7 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 120*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 120*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } @@ -1294,7 +1294,7 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 60*time.Second, 2.0, func() interface{} { + cortex_testutil.Poll(t, 60*time.Second, 2.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) } @@ -1511,7 +1511,7 @@ func removeIgnoredLogs(input []string) []string { executionIDRe := regexp.MustCompile(`\s?execution_id=\S+`) main: - for i := 0; i < len(input); i++ { + for i := range input { log := input[i] // Remove any duration from logs. @@ -1821,7 +1821,7 @@ func TestCompactor_DeleteLocalSyncFiles(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c1)) // Wait until a run has been completed on first compactor. This happens as soon as compactor starts. - cortex_testutil.Poll(t, 10*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 10*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c1.CompactionRunsCompleted) }) @@ -1832,7 +1832,7 @@ func TestCompactor_DeleteLocalSyncFiles(t *testing.T) { // Now start second compactor, and wait until it runs compaction. require.NoError(t, services.StartAndAwaitRunning(context.Background(), c2)) - cortex_testutil.Poll(t, 10*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 10*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c2.CompactionRunsCompleted) }) @@ -1918,7 +1918,7 @@ func TestCompactor_ShouldNotTreatInterruptionsAsErrors(t *testing.T) { }, nil) require.NoError(t, services.StartAndAwaitRunning(ctx, c)) - cortex_testutil.Poll(t, 1*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 1*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsInterrupted) }) @@ -1991,7 +1991,7 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSync(t *tes require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -2042,7 +2042,7 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFromBucket(t require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -2088,7 +2088,7 @@ func TestCompactor_FailedWithRetriableError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, 1*time.Second, 2.0, func() interface{} { + cortex_testutil.Poll(t, 1*time.Second, 2.0, func() any { return prom_testutil.ToFloat64(c.compactorMetrics.compactionErrorsCount.WithLabelValues("user-1", retriableError)) }) @@ -2142,7 +2142,7 @@ func TestCompactor_FailedWithHaltError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, 1*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 1*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.compactorMetrics.compactionErrorsCount.WithLabelValues("user-1", haltError)) }) @@ -2173,7 +2173,7 @@ func TestCompactor_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T // Create two compactors var compactors []*Compactor - for i := 0; i < 2; i++ { + for i := range 2 { // Setup config cfg := prepareConfig() @@ -2209,11 +2209,11 @@ func TestCompactor_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T require.NoError(t, services.StartAndAwaitRunning(context.Background(), compactor2)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(compactor2.CompactionRunsCompleted) }) - cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() interface{} { + cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() any { healthy, unhealthy, _ := compactor.ring.GetAllInstanceDescs(ring.Reporting) return len(healthy) == 2 && len(unhealthy) == 0 }) @@ -2222,7 +2222,7 @@ func TestCompactor_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T // compactor service while UnregisterOnShutdown is false require.NoError(t, services.StopAndAwaitTerminated(context.Background(), compactor2)) - cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() interface{} { + cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() any { healthy, unhealthy, _ := compactor.ring.GetAllInstanceDescs(ring.Reporting) return len(healthy) == 1 && len(unhealthy) == 0 }) @@ -2282,7 +2282,7 @@ func TestCompactor_GetShardSizeForUser(t *testing.T) { // Create compactors var compactors []*Compactor - for i := 0; i < 5; i++ { + for i := range 5 { // Setup config cfg := prepareConfig() @@ -2317,7 +2317,7 @@ func TestCompactor_GetShardSizeForUser(t *testing.T) { // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 120*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 120*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } @@ -2366,7 +2366,7 @@ func TestCompactor_GetShardSizeForUser(t *testing.T) { // Wait until a run has been completed on each compactor for _, c := range compactors2 { - cortex_testutil.Poll(t, 120*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 120*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } diff --git a/pkg/compactor/partition_compaction_grouper.go b/pkg/compactor/partition_compaction_grouper.go index 711df2d0a39..2d308fb6360 100644 --- a/pkg/compactor/partition_compaction_grouper.go +++ b/pkg/compactor/partition_compaction_grouper.go @@ -409,7 +409,7 @@ func (g *PartitionCompactionGrouper) partitionBlockGroup(group blocksGroupWithPa } partitions := make([]Partition, partitionCount) - for partitionID := 0; partitionID < partitionCount; partitionID++ { + for partitionID := range partitionCount { partitionedGroup := partitionedGroups[partitionID] blockIDs := make([]ulid.ULID, len(partitionedGroup.blocks)) for i, m := range partitionedGroup.blocks { @@ -468,10 +468,7 @@ func (g *PartitionCompactionGrouper) calculatePartitionCount(group blocksGroupWi if seriesCountLimit > 0 && totalSeriesCount > seriesCountLimit { partitionNumberBasedOnSeries = g.findNearestPartitionNumber(float64(totalSeriesCount), float64(seriesCountLimit)) } - partitionNumber := partitionNumberBasedOnIndex - if partitionNumberBasedOnSeries > partitionNumberBasedOnIndex { - partitionNumber = partitionNumberBasedOnSeries - } + partitionNumber := max(partitionNumberBasedOnSeries, partitionNumberBasedOnIndex) level.Info(g.logger).Log("msg", "calculated partition number for group", "partitioned_group_id", groupHash, "partition_number", partitionNumber, "total_index_size", totalIndexSizeInBytes, "index_size_limit", indexSizeLimit, "total_series_count", totalSeriesCount, "series_count_limit", seriesCountLimit, "group", group.String()) return partitionNumber } diff --git a/pkg/compactor/partition_compaction_grouper_test.go b/pkg/compactor/partition_compaction_grouper_test.go index 774ae23f11f..6ca1ee88779 100644 --- a/pkg/compactor/partition_compaction_grouper_test.go +++ b/pkg/compactor/partition_compaction_grouper_test.go @@ -1,7 +1,6 @@ package compactor import ( - "context" "encoding/json" "fmt" "path" @@ -2080,12 +2079,8 @@ func TestPartitionCompactionGrouper_GenerateCompactionJobs(t *testing.T) { b.fixPartitionInfo(t, userID) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - ingestionReplicationFactor := 1 - if testCase.ingestionReplicationFactor > 1 { - ingestionReplicationFactor = testCase.ingestionReplicationFactor - } + ctx := t.Context() + ingestionReplicationFactor := max(testCase.ingestionReplicationFactor, 1) g := NewPartitionCompactionGrouper( ctx, nil, diff --git a/pkg/compactor/sharded_posting_test.go b/pkg/compactor/sharded_posting_test.go index c277922fe0a..50f8bd557c3 100644 --- a/pkg/compactor/sharded_posting_test.go +++ b/pkg/compactor/sharded_posting_test.go @@ -44,7 +44,7 @@ func TestShardPostingAndSymbolBasedOnPartitionID(t *testing.T) { expectedSymbols[ConstLabelName] = false expectedSymbols[ConstLabelValue] = false expectedSeriesCount := 10 - for i := 0; i < expectedSeriesCount; i++ { + for range expectedSeriesCount { labelValue := strconv.Itoa(r.Int()) series = append(series, labels.FromStrings(metricName.Name, metricName.Value, ConstLabelName, ConstLabelValue, TestLabelName, labelValue)) expectedSymbols[TestLabelName] = false @@ -60,7 +60,7 @@ func TestShardPostingAndSymbolBasedOnPartitionID(t *testing.T) { } }() seriesCount := 0 - for partitionID := 0; partitionID < partitionCount; partitionID++ { + for partitionID := range partitionCount { ir, err := index.NewFileReader(filepath.Join(tmpdir, blockID.String(), "index"), index.DecodePostingsRaw) closers = append(closers, ir) require.NoError(t, err) diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go index 9cc0bb25d99..3ff91003755 100644 --- a/pkg/compactor/shuffle_sharding_grouper_test.go +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -2,7 +2,6 @@ package compactor import ( "bytes" - "context" "encoding/json" "path" "testing" @@ -380,8 +379,7 @@ func TestShuffleShardingGrouper_Groups(t *testing.T) { return testData.noCompactBlocks } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() g := NewShuffleShardingGrouper( ctx, nil, diff --git a/pkg/configs/api/api.go b/pkg/configs/api/api.go index 5ae41eade7c..56cf15bc943 100644 --- a/pkg/configs/api/api.go +++ b/pkg/configs/api/api.go @@ -350,8 +350,8 @@ func parseConfigFormat(v string, defaultFormat string) string { if v == "" { return defaultFormat } - parts := strings.Split(v, ",") - for _, part := range parts { + parts := strings.SplitSeq(v, ",") + for part := range parts { mimeType, _, err := mime.ParseMediaType(part) if err != nil { continue diff --git a/pkg/configs/db/postgres/postgres.go b/pkg/configs/db/postgres/postgres.go index 7ebd464bf8c..11d4ce233ac 100644 --- a/pkg/configs/db/postgres/postgres.go +++ b/pkg/configs/db/postgres/postgres.go @@ -43,9 +43,9 @@ type DB struct { } type dbProxy interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryRow(query string, args ...interface{}) *sql.Row + Exec(query string, args ...any) (sql.Result, error) + Query(query string, args ...any) (*sql.Rows, error) + QueryRow(query string, args ...any) *sql.Row Prepare(query string) (*sql.Stmt, error) } diff --git a/pkg/configs/db/traced.go b/pkg/configs/db/traced.go index 6f7bf7e0141..962bfebefc8 100644 --- a/pkg/configs/db/traced.go +++ b/pkg/configs/db/traced.go @@ -15,7 +15,7 @@ type traced struct { d DB } -func (t traced) trace(name string, args ...interface{}) { +func (t traced) trace(name string, args ...any) { level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) } diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go index 70b6ed70187..d218c9788eb 100644 --- a/pkg/configs/userconfig/config.go +++ b/pkg/configs/userconfig/config.go @@ -53,7 +53,7 @@ func (v RuleFormatVersion) MarshalJSON() ([]byte, error) { } // MarshalYAML implements yaml.Marshaler. -func (v RuleFormatVersion) MarshalYAML() (interface{}, error) { +func (v RuleFormatVersion) MarshalYAML() (any, error) { switch v { case RuleFormatV1: return yaml.Marshal("1") @@ -82,7 +82,7 @@ func (v *RuleFormatVersion) UnmarshalJSON(data []byte) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *RuleFormatVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *RuleFormatVersion) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -129,7 +129,7 @@ func (c Config) MarshalJSON() ([]byte, error) { } // MarshalYAML implements yaml.Marshaler. -func (c Config) MarshalYAML() (interface{}, error) { +func (c Config) MarshalYAML() (any, error) { compat := &configCompat{ RulesFiles: c.RulesConfig.Files, RuleFormatVersion: c.RulesConfig.FormatVersion, @@ -158,7 +158,7 @@ func (c *Config) UnmarshalJSON(data []byte) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { compat := configCompat{} if err := unmarshal(&compat); err != nil { return errors.WithStack(err) diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index f50fcf26e17..e7575abdcee 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -8,6 +8,7 @@ import ( "net/http" "os" "reflect" + "slices" "strings" "github.com/go-kit/log" @@ -250,7 +251,7 @@ func (c *Config) Validate(log log.Logger) error { } func (c *Config) isModuleEnabled(m string) bool { - return util.StringsContain(c.Target, m) + return slices.Contains(c.Target, m) } // validateYAMLEmptyNodes ensure that no empty node has been specified in the YAML config file. diff --git a/pkg/cortex/runtime_config.go b/pkg/cortex/runtime_config.go index c2bcc786d91..5f71746c2a2 100644 --- a/pkg/cortex/runtime_config.go +++ b/pkg/cortex/runtime_config.go @@ -64,7 +64,7 @@ type runtimeConfigLoader struct { cfg Config } -func (l runtimeConfigLoader) load(r io.Reader) (interface{}, error) { +func (l runtimeConfigLoader) load(r io.Reader) (any, error) { var overrides = &RuntimeConfigValues{} decoder := yaml.NewDecoder(r) @@ -145,7 +145,7 @@ func runtimeConfigHandler(runtimeCfgManager *runtimeconfig.Manager, defaultLimit return } - var output interface{} + var output any switch r.URL.Query().Get("mode") { case "diff": // Default runtime config is just empty struct, but to make diff work, diff --git a/pkg/cortex/tracing.go b/pkg/cortex/tracing.go index 1cdfa6a8190..15839f95b91 100644 --- a/pkg/cortex/tracing.go +++ b/pkg/cortex/tracing.go @@ -11,14 +11,14 @@ import ( // ThanosTracerUnaryInterceptor injects the opentracing global tracer into the context // in order to get it picked up by Thanos components. -func ThanosTracerUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func ThanosTracerUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { ctx = objstoretracing.ContextWithTracer(ctx, opentracing.GlobalTracer()) return handler(tracing.ContextWithTracer(ctx, opentracing.GlobalTracer()), req) } // ThanosTracerStreamInterceptor injects the opentracing global tracer into the context // in order to get it picked up by Thanos components. -func ThanosTracerStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func ThanosTracerStreamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { ctx := objstoretracing.ContextWithTracer(ss.Context(), opentracing.GlobalTracer()) return handler(srv, wrappedServerStream{ ctx: tracing.ContextWithTracer(ctx, opentracing.GlobalTracer()), diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go index 83bdbff33d1..db96d5fac84 100644 --- a/pkg/cortexpb/compat.go +++ b/pkg/cortexpb/compat.go @@ -45,7 +45,7 @@ func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMe } func (w *WriteRequest) AddHistogramTimeSeries(lbls []labels.Labels, histograms []Histogram) { - for i := 0; i < len(lbls); i++ { + for i := range lbls { ts := TimeseriesFromPool() ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...) ts.Histograms = append(ts.Histograms, histograms[i]) @@ -213,7 +213,7 @@ func (s Sample) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil + return fmt.Appendf(nil, "[%s,%s]", t, v), nil } // UnmarshalJSON implements json.Unmarshaler. diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go index 843aa290d07..336c8ce3fb0 100644 --- a/pkg/cortexpb/compat_test.go +++ b/pkg/cortexpb/compat_test.go @@ -23,7 +23,7 @@ func TestStdlibJsonMarshalForSample(t *testing.T) { testMarshalling(t, json.Marshal, "json: error calling MarshalJSON for type cortexpb.Sample: test sample") } -func testMarshalling(t *testing.T, marshalFn func(v interface{}) ([]byte, error), expectedError string) { +func testMarshalling(t *testing.T, marshalFn func(v any) ([]byte, error), expectedError string) { isTesting = true defer func() { isTesting = false }() @@ -51,7 +51,7 @@ func TestStdlibJsonUnmarshalForSample(t *testing.T) { testUnmarshalling(t, json.Unmarshal, "test sample") } -func testUnmarshalling(t *testing.T, unmarshalFn func(data []byte, v interface{}) error, expectedError string) { +func testUnmarshalling(t *testing.T, unmarshalFn func(data []byte, v any) error, expectedError string) { isTesting = true defer func() { isTesting = false }() @@ -134,7 +134,7 @@ func BenchmarkFromLabelAdaptersToLabelsWithCopy(b *testing.B) { {Name: "some label", Value: "and its value"}, {Name: "long long long long long label name", Value: "perhaps even longer label value, but who's counting anyway?"}} - for i := 0; i < b.N; i++ { + for b.Loop() { FromLabelAdaptersToLabelsWithCopy(input) } } diff --git a/pkg/cortexpb/extensions.go b/pkg/cortexpb/extensions.go index 716fafcc79d..e75b45e2ae9 100644 --- a/pkg/cortexpb/extensions.go +++ b/pkg/cortexpb/extensions.go @@ -15,7 +15,7 @@ const maxBufferSize = 1024 const signVersion = "v1" var signerPool = sync.Pool{ - New: func() interface{} { + New: func() any { return newSigner() }, } diff --git a/pkg/cortexpb/extensions_test.go b/pkg/cortexpb/extensions_test.go index 94a5f76d48d..158d67e9291 100644 --- a/pkg/cortexpb/extensions_test.go +++ b/pkg/cortexpb/extensions_test.go @@ -26,9 +26,8 @@ func BenchmarkSignRequest(b *testing.B) { for _, tc := range tests { b.Run(fmt.Sprintf("WriteRequestSize: %v", tc.size), func(b *testing.B) { wr := createWriteRequest(tc.size, true, "family1", "help1", "unit") - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := wr.Sign(ctx) require.NoError(b, err) } @@ -72,7 +71,7 @@ func TestWriteRequest_Sign(t *testing.T) { itNumber := 1000 wg := sync.WaitGroup{} wg.Add(itNumber) - for i := 0; i < itNumber; i++ { + for range itNumber { go func() { defer wg.Done() s, err := tc.w.Sign(ctx) @@ -96,7 +95,7 @@ func createWriteRequest(numTs int, exemplar bool, family string, help string, un }, } - for i := 0; i < numTs; i++ { + for i := range numTs { w.Timeseries = append(w.Timeseries, PreallocTimeseries{ TimeSeries: &TimeSeries{ Labels: []LabelAdapter{ diff --git a/pkg/cortexpb/histograms.go b/pkg/cortexpb/histograms.go index d05dbaa7727..aa13f276088 100644 --- a/pkg/cortexpb/histograms.go +++ b/pkg/cortexpb/histograms.go @@ -156,7 +156,7 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra func spansProtoToSpans(s []BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} } @@ -165,7 +165,7 @@ func spansProtoToSpans(s []BucketSpan) []histogram.Span { func spansToSpansProto(s []histogram.Span) []BucketSpan { spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } @@ -174,7 +174,7 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan { func spansPromProtoToSpansProto(s []prompb.BucketSpan) []BucketSpan { spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } @@ -183,7 +183,7 @@ func spansPromProtoToSpansProto(s []prompb.BucketSpan) []BucketSpan { func spansWriteV2ProtoToSpansProto(s []writev2.BucketSpan) []BucketSpan { spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } diff --git a/pkg/cortexpb/slicesPool.go b/pkg/cortexpb/slicesPool.go index e28d51d4f23..c0f3a2c7c3b 100644 --- a/pkg/cortexpb/slicesPool.go +++ b/pkg/cortexpb/slicesPool.go @@ -21,10 +21,10 @@ func newSlicePool(pools int) *byteSlicePools { func (sp *byteSlicePools) init(pools int) { sp.pools = make([]sync.Pool, pools) - for i := 0; i < pools; i++ { + for i := range pools { size := int(math.Pow(2, float64(i+minPoolSizePower))) sp.pools[i] = sync.Pool{ - New: func() interface{} { + New: func() any { buf := make([]byte, 0, size) return &buf }, diff --git a/pkg/cortexpb/slicesPool_test.go b/pkg/cortexpb/slicesPool_test.go index 9bc56cdec3f..d5f3f0a1c61 100644 --- a/pkg/cortexpb/slicesPool_test.go +++ b/pkg/cortexpb/slicesPool_test.go @@ -12,7 +12,7 @@ func TestFuzzyByteSlicePools(t *testing.T) { sut := newSlicePool(20) maxByteSize := int(math.Pow(2, 20+minPoolSizePower-1)) - for i := 0; i < 1000; i++ { + for range 1000 { size := rand.Int() % maxByteSize s := sut.getSlice(size) assert.Equal(t, len(*s), size) diff --git a/pkg/cortexpb/timeseries.go b/pkg/cortexpb/timeseries.go index db7354ffe45..4d780bba6a1 100644 --- a/pkg/cortexpb/timeseries.go +++ b/pkg/cortexpb/timeseries.go @@ -24,13 +24,13 @@ var ( is re-used. But since the slices are far far larger, we come out ahead. */ slicePool = sync.Pool{ - New: func() interface{} { + New: func() any { return make([]PreallocTimeseries, 0, expectedTimeseries) }, } timeSeriesPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &TimeSeries{ Labels: make([]LabelAdapter, 0, expectedLabels), Samples: make([]Sample, 0, expectedSamplesPerSeries), @@ -41,7 +41,7 @@ var ( } writeRequestPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &PreallocWriteRequest{ WriteRequest: WriteRequest{}, } diff --git a/pkg/cortexpb/timeseries_test.go b/pkg/cortexpb/timeseries_test.go index abba35a88db..6194b7e9943 100644 --- a/pkg/cortexpb/timeseries_test.go +++ b/pkg/cortexpb/timeseries_test.go @@ -70,7 +70,7 @@ func TestTimeseriesFromPool(t *testing.T) { func BenchmarkMarshallWriteRequest(b *testing.B) { ts := PreallocTimeseriesSliceFromPool() - for i := 0; i < 100; i++ { + for i := range 100 { ts = append(ts, PreallocTimeseries{TimeSeries: TimeseriesFromPool()}) ts[i].Labels = []LabelAdapter{ {Name: "foo", Value: "bar"}, @@ -85,14 +85,14 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { tests := []struct { name string writeRequestFactory func() proto.Marshaler - clean func(in interface{}) + clean func(in any) }{ { name: "no-pool", writeRequestFactory: func() proto.Marshaler { return &WriteRequest{Timeseries: ts} }, - clean: func(in interface{}) {}, + clean: func(in any) {}, }, { name: "byte pool", @@ -101,7 +101,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { w.Timeseries = ts return w }, - clean: func(in interface{}) { + clean: func(in any) { ReuseWriteRequest(in.(*PreallocWriteRequest)) }, }, @@ -112,7 +112,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { w.Timeseries = ts return w }, - clean: func(in interface{}) { + clean: func(in any) { ReuseWriteRequest(in.(*PreallocWriteRequest)) }, }, @@ -120,7 +120,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { for _, tc := range tests { b.Run(tc.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { w := tc.writeRequestFactory() _, err := w.Marshal() require.NoError(b, err) diff --git a/pkg/distributed_execution/codec_test.go b/pkg/distributed_execution/codec_test.go index 1f294f7bc7f..89fdd30f910 100644 --- a/pkg/distributed_execution/codec_test.go +++ b/pkg/distributed_execution/codec_test.go @@ -62,7 +62,7 @@ func verifyNodeStructure(t *testing.T, expected logicalplan.Node, actual logical require.Equal(t, len(expectedChildren), len(actualChildren)) - for i := 0; i < len(expectedChildren); i++ { + for i := range expectedChildren { if expectedChildren[i] != nil && actualChildren[i] != nil { verifyNodeStructure(t, *expectedChildren[i], *actualChildren[i]) } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 716a6c06a02..196fdbbfe4a 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "slices" "sort" "strings" "sync" @@ -230,7 +231,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Validate config and returns error on failure func (cfg *Config) Validate(limits validation.Limits) error { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -1243,8 +1244,8 @@ func getErrorStatus(err error) string { } // ForReplicationSet runs f, in parallel, for all ingesters in the input replication set. -func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, ingester_client.IngesterClient) (interface{}, error)) ([]interface{}, error) { - return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, zoneResultsQuorum, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { +func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, ingester_client.IngesterClient) (any, error)) ([]any, error) { + return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, zoneResultsQuorum, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (any, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -1254,7 +1255,7 @@ func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring }) } -func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, to model.Time, labelName model.LabelName, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, limiter *limiter.QueryLimiter) ([]interface{}, error), matchers ...*labels.Matcher) ([]string, error) { +func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, to model.Time, labelName model.LabelName, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, limiter *limiter.QueryLimiter) ([]any, error), matchers ...*labels.Matcher) ([]string, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Distributor.LabelValues", opentracing.Tags{ "name": labelName, "start": from.Unix(), @@ -1296,8 +1297,8 @@ func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, t // LabelValuesForLabelName returns all the label values that are associated with a given label name. func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, labelName model.LabelName, hint *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { resp, err := client.LabelValues(ctx, req) if err != nil { return nil, err @@ -1312,8 +1313,8 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to mode // LabelValuesForLabelNameStream returns all the label values that are associated with a given label name. func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, labelName model.LabelName, hint *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { stream, err := client.LabelValuesStream(ctx, req) if err != nil { return nil, err @@ -1339,7 +1340,7 @@ func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, t }, matchers...) } -func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, limiter *limiter.QueryLimiter) ([]interface{}, error), matchers ...*labels.Matcher) ([]string, error) { +func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, limiter *limiter.QueryLimiter) ([]any, error), matchers ...*labels.Matcher) ([]string, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Distributor.LabelNames", opentracing.Tags{ "start": from.Unix(), "end": to.Unix(), @@ -1382,8 +1383,8 @@ func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, } func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time, hints *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, hints, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelNamesCommon(ctx, from, to, hints, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { stream, err := client.LabelNamesStream(ctx, req) if err != nil { return nil, err @@ -1411,8 +1412,8 @@ func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time, // LabelNames returns all the label names. func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time, hint *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelNamesCommon(ctx, from, to, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { resp, err := client.LabelNames(ctx, req) if err != nil { return nil, err @@ -1429,7 +1430,7 @@ func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time, hint // MetricsForLabelMatchers gets the metrics that match said matchers func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, hint *storage.SelectHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]labels.Labels, error) { return d.metricsForLabelMatchersCommon(ctx, from, through, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]labels.Labels, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { - _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { resp, err := client.MetricsForLabelMatchers(ctx, req) if err != nil { return nil, err @@ -1458,7 +1459,7 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through func (d *Distributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, hint *storage.SelectHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]labels.Labels, error) { return d.metricsForLabelMatchersCommon(ctx, from, through, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]labels.Labels, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { - _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { stream, err := client.MetricsForLabelMatchersStream(ctx, req) if err != nil { return nil, err @@ -1533,7 +1534,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context, req *ingester_client. } // TODO(gotjosh): We only need to look in all the ingesters if shardByAllLabels is enabled. - resps, err := d.ForReplicationSet(ctx, replicationSet, d.cfg.ZoneResultsQuorumMetadata, false, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, d.cfg.ZoneResultsQuorumMetadata, false, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { return client.MetricsMetadata(ctx, req) }) if err != nil { @@ -1575,7 +1576,7 @@ func (d *Distributor) UserStats(ctx context.Context) (*ingester.UserStats, error replicationSet.MaxErrors = 0 req := &ingester_client.UserStatsRequest{} - resps, err := d.ForReplicationSet(ctx, replicationSet, false, false, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, false, false, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { return client.UserStats(ctx, req) }) if err != nil { diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index d24faa3bd43..fd50aef9d1a 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "maps" "math" "math/rand" "net/http" @@ -116,7 +117,7 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData // Needed for t.Parallel to work correctly + // Needed for t.Parallel to work correctly t.Run(testName, func(t *testing.T) { t.Parallel() cfg := Config{} @@ -390,7 +391,7 @@ func TestDistributor_Push(t *testing.T) { // yet. To avoid flaky test we retry metrics assertion until we hit the desired state (no error) // within a reasonable timeout. if tc.expectedMetrics != "" { - test.Poll(t, time.Second, nil, func() interface{} { + test.Poll(t, time.Second, nil, func() any { return testutil.GatherAndCompare(regs[0], strings.NewReader(tc.expectedMetrics), tc.metricNames...) }) } @@ -527,13 +528,13 @@ func TestDistributor_MetricsCleanup(t *testing.T) { d.cleanupInactiveUser("userA") - err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in interface{}) (interface{}, bool, error) { + err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in any) (any, bool, error) { r := in.(*ring.Desc) delete(r.Ingesters, "ingester-0") return in, true, nil }) - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { ings, _, _ := r.GetAllInstanceDescs(ring.Write) return len(ings) == 1 }) @@ -637,10 +638,8 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { } for testName, testData := range tests { - testData := testData for _, enableHistogram := range []bool{false, true} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) { t.Parallel() limits := &validation.Limits{} @@ -805,7 +804,6 @@ func TestDistributor_PushIngestionRateLimiter_Histograms(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -882,7 +880,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].failResp.Store(httpgrpc.Errorf(500, "InternalServerError")) ingesters[2].failResp.Store(httpgrpc.Errorf(429, "Throttling")) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) @@ -895,7 +893,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].failResp.Store(httpgrpc.Errorf(429, "Throttling")) ingesters[2].failResp.Store(httpgrpc.Errorf(500, "InternalServerError")) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 300, 200, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) @@ -908,7 +906,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].failResp.Store(httpgrpc.Errorf(429, "Throttling")) ingesters[2].happy.Store(true) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) @@ -921,7 +919,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].happy.Store(true) ingesters[2].happy.Store(true) - for i := 0; i < 1; i++ { + for range 1 { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) require.NoError(t, err) @@ -932,7 +930,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].happy.Store(true) ingesters[2].happy.Store(true) - err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in interface{}) (interface{}, bool, error) { + err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in any) (any, bool, error) { r := in.(*ring.Desc) ingester2 := r.Ingesters["ingester-2"] ingester2.State = ring.LEFT @@ -944,12 +942,12 @@ func TestPush_QuorumError(t *testing.T) { require.NoError(t, err) // Give time to the ring get updated with the KV value - test.Poll(t, 15*time.Second, true, func() interface{} { + test.Poll(t, 15*time.Second, true, func() any { replicationSet, _ := r.GetAllHealthy(ring.Read) return len(replicationSet.Instances) == 2 }) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) require.Error(t, err) @@ -1096,10 +1094,8 @@ func TestDistributor_PushInstanceLimits(t *testing.T) { } for testName, testData := range tests { - testData := testData for _, enableHistogram := range []bool{true, false} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) { t.Parallel() limits := &validation.Limits{} @@ -1204,7 +1200,6 @@ func TestDistributor_PushHAInstances(t *testing.T) { tc := tc shardByAllLabels := shardByAllLabels for _, enableHistogram := range []bool{true, false} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v, histogram=%v)", i, shardByAllLabels, enableHistogram), func(t *testing.T) { t.Parallel() var limits validation.Limits @@ -1267,7 +1262,6 @@ func TestDistributor_PushMixedHAInstances(t *testing.T) { tc := tc shardByAllLabels := shardByAllLabels for _, enableHistogram := range []bool{false} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v, histogram=%v)", i, shardByAllLabels, enableHistogram), func(t *testing.T) { t.Parallel() var limits validation.Limits @@ -1454,7 +1448,7 @@ func TestDistributor_PushQuery(t *testing.T) { }) // And reading each sample individually. - for i := 0; i < 10; i++ { + for i := range 10 { testcases = append(testcases, testcase{ name: fmt.Sprintf("ReadOne(%s, sample=%d)", scenario, i), numIngesters: numIngesters, @@ -1473,7 +1467,6 @@ func TestDistributor_PushQuery(t *testing.T) { } for _, tc := range testcases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ds, ingesters, _, _ := prepare(t, prepConfig{ @@ -1559,7 +1552,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReac // Push more series to exceed the limit once we'll query back all series. writeReq = &cortexpb.WriteRequest{} - for i := 0; i < maxChunksLimit; i++ { + for i := range maxChunksLimit { writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: fmt.Sprintf("another_series_%d", i)}}, 0, 0, histogram), ) @@ -2039,7 +2032,6 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * limits.AcceptHASamples = true for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() ds, ingesters, _, _ := prepare(t, prepConfig{ @@ -2099,9 +2091,7 @@ func TestDistributor_Push_LabelNameValidation(t *testing.T) { } for testName, tc := range tests { - tc := tc for _, histogram := range []bool{true, false} { - histogram := histogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) { t.Parallel() ds, _, _, _ := prepare(t, prepConfig{ @@ -2167,7 +2157,6 @@ func TestDistributor_Push_ExemplarValidation(t *testing.T) { } for testName, tc := range tests { - tc := tc t.Run(testName, func(t *testing.T) { t.Parallel() ds, _, _, _ := prepare(t, prepConfig{ @@ -2237,9 +2226,8 @@ func BenchmarkDistributor_GetLabelsValues(b *testing.B) { lblValuesDuplicateRatio: tc.lblValuesDuplicateRatio, }) b.Run(name, func(b *testing.B) { - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := ds[0].LabelValuesForLabelName(ctx, model.Time(time.Now().UnixMilli()), model.Time(time.Now().UnixMilli()), "__name__", nil, false) require.NoError(b, err) } @@ -2265,8 +2253,8 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) - for i := 0; i < numSeriesPerRequest; i++ { - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2290,9 +2278,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) - for i := 0; i < 10; i++ { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2315,7 +2303,7 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 1; i < 31; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) @@ -2340,9 +2328,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) - for i := 0; i < 10; i++ { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2368,9 +2356,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) - for i := 0; i < 10; i++ { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2396,9 +2384,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) - for i := 0; i < 10; i++ { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2425,9 +2413,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) - for i := 0; i < 10; i++ { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2450,9 +2438,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) - for i := 0; i < 10; i++ { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2479,7 +2467,7 @@ func BenchmarkDistributor_Push(b *testing.B) { b.Cleanup(func() { assert.NoError(b, closer.Close()) }) err := kvStore.CAS(context.Background(), ingester.RingKey, - func(_ interface{}) (interface{}, bool, error) { + func(_ any) (any, bool, error) { d := &ring.Desc{} d.AddIngester("ingester-1", "127.0.0.1", "", tg.GenerateTokens(d, "ingester-1", "", 128, true), ring.ACTIVE, time.Now()) return d, true, nil @@ -2498,7 +2486,7 @@ func BenchmarkDistributor_Push(b *testing.B) { require.NoError(b, services.StopAndAwaitTerminated(context.Background(), ingestersRing)) }) - test.Poll(b, time.Second, 1, func() interface{} { + test.Poll(b, time.Second, 1, func() any { return ingestersRing.InstancesCount() }) @@ -2533,9 +2521,8 @@ func BenchmarkDistributor_Push(b *testing.B) { // Run the benchmark. b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := distributor.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) if testData.expectedErr == "" && err != nil { b.Fatalf("no error expected but got %v", err) @@ -2599,14 +2586,14 @@ func TestDistributor_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) { now := model.Now() - for i := 0; i < 100; i++ { + for i := range 100 { req := mockWriteRequest([]labels.Labels{labels.FromStrings(labels.MetricName, "test", "app", "m", "uniq8", strconv.Itoa(i))}, 1, now.Unix(), histogram) _, err := ds[0].Push(ctx, req) require.NoError(t, err) } - for i := 0; i < 50; i++ { + for range 50 { _, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, false, mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test")) require.NoError(t, err) } @@ -2768,9 +2755,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { } for testName, testData := range tests { - testData := testData for _, histogram := range []bool{true, false} { - histogram := histogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) { t.Parallel() now := model.Now() @@ -2849,9 +2834,9 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("foo_%d", i))) - for i := 0; i < 10; i++ { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2897,9 +2882,8 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { // Run the benchmark. b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { now := model.Now() metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, false, testData.matchers...) @@ -2947,7 +2931,6 @@ func TestDistributor_MetricsMetadata(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() // Create distributor @@ -3092,7 +3075,7 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] tb.Cleanup(func() { assert.NoError(tb, closer.Close()) }) err := kvStore.CAS(context.Background(), ingester.RingKey, - func(_ interface{}) (interface{}, bool, error) { + func(_ any) (any, bool, error) { return &ring.Desc{ Ingesters: ingesterDescs, }, true, nil @@ -3116,7 +3099,7 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] require.NoError(tb, err) require.NoError(tb, services.StartAndAwaitRunning(context.Background(), ingestersRing)) - test.Poll(tb, time.Second, cfg.numIngesters, func() interface{} { + test.Poll(tb, time.Second, cfg.numIngesters, func() any { return ingestersRing.InstancesCount() }) @@ -3184,7 +3167,7 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] // If the distributors ring is setup, wait until the first distributor // updates to the expected size if distributors[0].distributorsRing != nil { - test.Poll(tb, time.Second, cfg.numDistributors, func() interface{} { + test.Poll(tb, time.Second, cfg.numDistributors, func() any { return distributors[0].distributorsLifeCycler.HealthyInstancesCount() }) } @@ -3205,7 +3188,7 @@ func stopAll(ds []*Distributor, r *ring.Ring) { func makeWriteRequest(startTimestampMs int64, samples int, metadata int, histograms int) *cortexpb.WriteRequest { request := &cortexpb.WriteRequest{} - for i := 0; i < samples; i++ { + for i := range samples { request.Timeseries = append(request.Timeseries, makeWriteRequestTimeseries( []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "foo"}, @@ -3214,7 +3197,7 @@ func makeWriteRequest(startTimestampMs int64, samples int, metadata int, histogr }, startTimestampMs+int64(i), int64(i), false)) } - for i := 0; i < histograms; i++ { + for i := range histograms { request.Timeseries = append(request.Timeseries, makeWriteRequestTimeseries( []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "foo"}, @@ -3223,7 +3206,7 @@ func makeWriteRequest(startTimestampMs int64, samples int, metadata int, histogr }, startTimestampMs+int64(i), int64(i), true)) } - for i := 0; i < metadata; i++ { + for i := range metadata { m := &cortexpb.MetricMetadata{ MetricFamilyName: fmt.Sprintf("metric_%d", i), Type: cortexpb.COUNTER, @@ -3254,7 +3237,7 @@ func makeWriteRequestTimeseries(labels []cortexpb.LabelAdapter, ts, value int64, func makeWriteRequestHA(samples int, replica, cluster string, histogram bool) *cortexpb.WriteRequest { request := &cortexpb.WriteRequest{} - for i := 0; i < samples; i++ { + for i := range samples { ts := cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ Labels: []cortexpb.LabelAdapter{ @@ -3369,7 +3352,7 @@ func makeWriteRequestHAMixedSamples(samples int, histogram bool) *cortexpb.Write } } else { var s = make([]cortexpb.Sample, 0) - for i := 0; i < samples; i++ { + for i := range samples { sample := cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i), @@ -3475,9 +3458,7 @@ func (i *mockIngester) series() map[uint32]*cortexpb.PreallocTimeseries { defer i.Unlock() result := map[uint32]*cortexpb.PreallocTimeseries{} - for k, v := range i.timeseries { - result[k] = v - } + maps.Copy(result, i.timeseries) return result } @@ -3939,7 +3920,6 @@ func TestDistributorValidation(t *testing.T) { err: httpgrpc.Errorf(http.StatusBadRequest, `timestamp too new: %d metric: "testmetric"`, future), }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var limits validation.Limits @@ -4108,9 +4088,7 @@ func TestDistributor_Push_Relabel(t *testing.T) { } for _, tc := range cases { - tc := tc for _, enableHistogram := range []bool{false, true} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("%s, histogram=%s", tc.name, strconv.FormatBool(enableHistogram)), func(t *testing.T) { t.Parallel() var err error @@ -4166,7 +4144,6 @@ func TestDistributor_Push_EmptyLabel(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() var err error @@ -4368,7 +4345,7 @@ func TestDistributor_PushLabelSetMetrics(t *testing.T) { func countMockIngestersCalls(ingesters []*mockIngester, name string) int { count := 0 - for i := 0; i < len(ingesters); i++ { + for i := range ingesters { if ingesters[i].countCalls(name) > 0 { count++ } diff --git a/pkg/distributor/ingestion_rate_strategy_test.go b/pkg/distributor/ingestion_rate_strategy_test.go index 84152bd8248..fd8ea0d362e 100644 --- a/pkg/distributor/ingestion_rate_strategy_test.go +++ b/pkg/distributor/ingestion_rate_strategy_test.go @@ -89,7 +89,6 @@ func TestIngestionRateStrategy(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 9645a65672f..3b44d895bc9 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -162,7 +162,7 @@ func mergeExemplarSets(a, b []cortexpb.Exemplar) []cortexpb.Exemplar { func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.ExemplarQueryRequest) (*ingester_client.ExemplarQueryResponse, error) { // Fetch exemplars from multiple ingesters in parallel, using the replicationSet // to deal with consistency. - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, false, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, false, func(ctx context.Context, ing *ring.InstanceDesc) (any, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -190,7 +190,7 @@ func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSe return mergeExemplarQueryResponses(results), nil } -func mergeExemplarQueryResponses(results []interface{}) *ingester_client.ExemplarQueryResponse { +func mergeExemplarQueryResponses(results []any) *ingester_client.ExemplarQueryResponse { var keys []string exemplarResults := make(map[string]cortexpb.TimeSeries) buf := make([]byte, 0, 1024) @@ -229,7 +229,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri ) // Fetch samples from multiple ingesters - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (any, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err diff --git a/pkg/distributor/query_test.go b/pkg/distributor/query_test.go index 5c9d35073b9..384cd849a71 100644 --- a/pkg/distributor/query_test.go +++ b/pkg/distributor/query_test.go @@ -73,16 +73,15 @@ func TestMergeExemplars(t *testing.T) { {Labels: labels2, Exemplars: []cortexpb.Exemplar{exemplar3, exemplar4}}}, }, } { - c := c t.Run(fmt.Sprint("test", i), func(t *testing.T) { t.Parallel() rA := &ingester_client.ExemplarQueryResponse{Timeseries: c.seriesA} rB := &ingester_client.ExemplarQueryResponse{Timeseries: c.seriesB} - e := mergeExemplarQueryResponses([]interface{}{rA, rB}) + e := mergeExemplarQueryResponses([]any{rA, rB}) require.Equal(t, c.expected, e.Timeseries) if !c.nonReversible { // Check the other way round too - e = mergeExemplarQueryResponses([]interface{}{rB, rA}) + e = mergeExemplarQueryResponses([]any{rB, rA}) require.Equal(t, c.expected, e.Timeseries) } }) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index edce4839413..9aab95281ec 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -7,6 +7,7 @@ import ( "flag" "fmt" "io" + "maps" "net/http" "net/url" "strconv" @@ -352,9 +353,7 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - for h, vs := range resp.Header { - hs[h] = vs - } + maps.Copy(hs, resp.Header) w.WriteHeader(resp.StatusCode) // log copy response body error so that we will know even though success response code returned @@ -364,10 +363,10 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -func formatGrafanaStatsFields(r *http.Request) []interface{} { +func formatGrafanaStatsFields(r *http.Request) []any { // NOTE(GiedriusS): see https://github.com/grafana/grafana/pull/60301 for more info. - fields := make([]interface{}, 0, 4) + fields := make([]any, 0, 4) if dashboardUID := r.Header.Get("X-Dashboard-Uid"); dashboardUID != "" { fields = append(fields, "X-Dashboard-Uid", dashboardUID) } @@ -379,7 +378,7 @@ func formatGrafanaStatsFields(r *http.Request) []interface{} { // logQueryRequest logs query request before query execution. func (f *Handler) logQueryRequest(r *http.Request, queryString url.Values, source string) { - logMessage := []interface{}{ + logMessage := []any{ "msg", "query request", "component", "query-frontend", "method", r.Method, @@ -419,7 +418,7 @@ func (f *Handler) logQueryRequest(r *http.Request, queryString url.Values, sourc // reportSlowQuery reports slow queries. func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, queryResponseTime time.Duration) { - logMessage := []interface{}{ + logMessage := []any{ "msg", "slow query detected", "method", r.Method, "host", r.Host, @@ -473,7 +472,7 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query } // Log stats. - logMessage := append([]interface{}{ + logMessage := append([]any{ "msg", "query stats", "component", "query-frontend", "method", r.Method, @@ -611,12 +610,12 @@ func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) return r.Form } -func formatQueryString(queryString url.Values) (fields []interface{}) { - var queryFields []interface{} +func formatQueryString(queryString url.Values) (fields []any) { + var queryFields []any for k, v := range queryString { // If `query` or `match[]` field exists, we always put it as the last field. if k == "query" || k == "match[]" { - queryFields = []interface{}{fmt.Sprintf("param_%s", k), strings.Join(v, ",")} + queryFields = []any{fmt.Sprintf("param_%s", k), strings.Join(v, ",")} continue } fields = append(fields, fmt.Sprintf("param_%s", k), strings.Join(v, ",")) diff --git a/pkg/frontend/v1/queue_test.go b/pkg/frontend/v1/queue_test.go index a11cfe15131..35d5f2010ec 100644 --- a/pkg/frontend/v1/queue_test.go +++ b/pkg/frontend/v1/queue_test.go @@ -61,7 +61,7 @@ func TestDequeuesExpiredRequests(t *testing.T) { cancel() good := 0 - for i := 0; i < 10; i++ { + for i := range 10 { var err error if i%5 == 0 { good++ @@ -101,7 +101,7 @@ func TestRoundRobinQueues(t *testing.T) { f, err := setupFrontend(t, requests, config) require.NoError(t, err) - for i := 0; i < requests; i++ { + for i := range requests { userID := fmt.Sprint(i / tenants) ctx := user.InjectOrgID(context.Background(), userID) @@ -167,5 +167,5 @@ func (p *processServerMock) SetHeader(_ metadata.MD) error { return nil } func (p *processServerMock) SendHeader(_ metadata.MD) error { return nil } func (p *processServerMock) SetTrailer(md metadata.MD) {} func (p *processServerMock) Context() context.Context { return p.ctx } -func (p *processServerMock) SendMsg(m interface{}) error { return nil } -func (p *processServerMock) RecvMsg(m interface{}) error { return nil } +func (p *processServerMock) SendMsg(m any) error { return nil } +func (p *processServerMock) RecvMsg(m any) error { return nil } diff --git a/pkg/frontend/v2/frontend_test.go b/pkg/frontend/v2/frontend_test.go index 676070ca0f6..5ba83213d86 100644 --- a/pkg/frontend/v2/frontend_test.go +++ b/pkg/frontend/v2/frontend_test.go @@ -72,7 +72,7 @@ func setupFrontend(t *testing.T, schedulerReplyFunc func(f *Frontend, msg *sched }) // Wait for frontend to connect to scheduler. - test.Poll(t, 1*time.Second, 1, func() interface{} { + test.Poll(t, 1*time.Second, 1, func() any { ms.mu.Lock() defer ms.mu.Unlock() @@ -206,7 +206,7 @@ func TestFrontendCancellation(t *testing.T) { require.Nil(t, resp) // We wait a bit to make sure scheduler receives the cancellation request. - test.Poll(t, time.Second, 2, func() interface{} { + test.Poll(t, time.Second, 2, func() any { ms.mu.Lock() defer ms.mu.Unlock() diff --git a/pkg/ha/ha_tracker.go b/pkg/ha/ha_tracker.go index cc0ae8d8f65..6ff0dc42673 100644 --- a/pkg/ha/ha_tracker.go +++ b/pkg/ha/ha_tracker.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "math/rand" + "slices" "strings" "sync" "time" @@ -109,10 +110,8 @@ func (cfg *HATrackerConfig) Validate() error { // Tracker kv store only supports consul and etcd. storeAllowedList := []string{"consul", "etcd"} - for _, as := range storeAllowedList { - if cfg.KVStore.Store == as { - return nil - } + if slices.Contains(storeAllowedList, cfg.KVStore.Store) { + return nil } return fmt.Errorf("invalid HATracker KV store type: %s", cfg.KVStore.Store) } @@ -260,7 +259,7 @@ func (c *HATracker) loop(ctx context.Context) error { // The KVStore config we gave when creating c should have contained a prefix, // which would have given us a prefixed KVStore client. So, we can pass empty string here. - c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool { + c.client.WatchPrefix(ctx, "", func(key string, value any) bool { replica := value.(*ReplicaDesc) user, cluster, keyHasSeparator := strings.Cut(key, "/") @@ -383,7 +382,7 @@ func (c *HATracker) cleanupOldReplicas(ctx context.Context, deadline time.Time) // Not marked as deleted yet. if desc.DeletedAt == 0 && timestamp.Time(desc.ReceivedAt).Before(deadline) { - err := c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { d, ok := in.(*ReplicaDesc) if !ok || d == nil || d.DeletedAt > 0 || !timestamp.Time(desc.ReceivedAt).Before(deadline) { return nil, false, nil @@ -452,7 +451,7 @@ func (c *HATracker) CheckReplica(ctx context.Context, userID, replicaGroup, repl } func (c *HATracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error { - return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + return c.client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { if desc, ok := in.(*ReplicaDesc); ok && desc.DeletedAt == 0 { // We don't need to CAS and update the timestamp in the KV store if the timestamp we've received // this sample at is less than updateTimeout amount of time since the timestamp in the KV store. diff --git a/pkg/ha/ha_tracker_test.go b/pkg/ha/ha_tracker_test.go index 563d7907938..3d576082aae 100644 --- a/pkg/ha/ha_tracker_test.go +++ b/pkg/ha/ha_tracker_test.go @@ -39,7 +39,7 @@ func checkReplicaTimestamp(t *testing.T, duration time.Duration, c *HATracker, u // to match "received at" precision expected = expected.Truncate(time.Millisecond) - test.Poll(t, duration, nil, func() interface{} { + test.Poll(t, duration, nil, func() any { c.electedLock.RLock() r := c.elected[key] c.electedLock.RUnlock() @@ -120,7 +120,6 @@ func TestHATrackerConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() assert.Equal(t, testData.expectedErr, testData.cfg.Validate()) @@ -455,7 +454,6 @@ func TestCheckReplicaUpdateTimeoutJitter(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() // Init HA tracker @@ -573,7 +571,7 @@ func TestHAClustersLimit(t *testing.T) { func waitForClustersUpdate(t *testing.T, expected int, tr *HATracker, userID string) { t.Helper() - test.Poll(t, 2*time.Second, expected, func() interface{} { + test.Poll(t, 2*time.Second, expected, func() any { tr.electedLock.RLock() defer tr.electedLock.RUnlock() @@ -762,7 +760,7 @@ func TestCheckReplicaCleanup(t *testing.T) { func checkUserReplicaGroups(t *testing.T, duration time.Duration, c *HATracker, user string, expectedReplicaGroups int) { t.Helper() - test.Poll(t, duration, nil, func() interface{} { + test.Poll(t, duration, nil, func() any { c.electedLock.RLock() cl := len(c.replicaGroups[user]) c.electedLock.RUnlock() @@ -778,7 +776,7 @@ func checkUserReplicaGroups(t *testing.T, duration time.Duration, c *HATracker, func checkReplicaDeletionState(t *testing.T, duration time.Duration, c *HATracker, user, replicaGroup string, expectedExistsInMemory, expectedExistsInKV, expectedMarkedForDeletion bool) { key := fmt.Sprintf("%s/%s", user, replicaGroup) - test.Poll(t, duration, nil, func() interface{} { + test.Poll(t, duration, nil, func() any { c.electedLock.RLock() _, exists := c.elected[key] c.electedLock.RUnlock() diff --git a/pkg/ingester/active_series.go b/pkg/ingester/active_series.go index 1c3bf4c6d86..57134a03ca6 100644 --- a/pkg/ingester/active_series.go +++ b/pkg/ingester/active_series.go @@ -42,7 +42,7 @@ func NewActiveSeries() *ActiveSeries { c := &ActiveSeries{} // Stripes are pre-allocated so that we only read on them and no lock is required. - for i := 0; i < numActiveSeriesStripes; i++ { + for i := range numActiveSeriesStripes { c.stripes[i].refs = map[uint64][]activeSeriesEntry{} } @@ -59,21 +59,21 @@ func (c *ActiveSeries) UpdateSeries(series labels.Labels, hash uint64, now time. // Purge removes expired entries from the cache. This function should be called // periodically to avoid memory leaks. func (c *ActiveSeries) Purge(keepUntil time.Time) { - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { c.stripes[s].purge(keepUntil) } } // nolint // Linter reports that this method is unused, but it is. func (c *ActiveSeries) clear() { - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { c.stripes[s].clear() } } func (c *ActiveSeries) Active() int { total := 0 - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { total += c.stripes[s].getActive() } return total @@ -81,7 +81,7 @@ func (c *ActiveSeries) Active() int { func (c *ActiveSeries) ActiveNativeHistogram() int { total := 0 - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { total += c.stripes[s].getActiveNativeHistogram() } return total diff --git a/pkg/ingester/active_series_test.go b/pkg/ingester/active_series_test.go index fe7840f2576..49a24c89361 100644 --- a/pkg/ingester/active_series_test.go +++ b/pkg/ingester/active_series_test.go @@ -52,10 +52,10 @@ func TestActiveSeries_Purge(t *testing.T) { } // Run the same test for increasing TTL values - for ttl := 0; ttl < len(series); ttl++ { + for ttl := range series { c := NewActiveSeries() - for i := 0; i < len(series); i++ { + for i := range series { c.UpdateSeries(fromLabelToLabels(series[i]), fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), true, copyFn) } @@ -117,7 +117,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) start := make(chan struct{}) max := int(math.Ceil(float64(b.N) / float64(goroutines))) labelhash := series.Hash() - for i := 0; i < goroutines; i++ { + for range goroutines { wg.Add(1) go func() { defer wg.Done() @@ -125,7 +125,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) now := time.Now() - for ix := 0; ix < max; ix++ { + for ix := range max { now = now.Add(time.Duration(ix) * time.Millisecond) c.UpdateSeries(series, labelhash, now, false, copyFn) } @@ -142,22 +142,21 @@ func BenchmarkActiveSeries_UpdateSeries(b *testing.B) { // Prepare series nameBuf := bytes.Buffer{} - for i := 0; i < 50; i++ { + for range 50 { nameBuf.WriteString("abcdefghijklmnopqrstuvzyx") } name := nameBuf.String() series := make([]labels.Labels, b.N) labelhash := make([]uint64, b.N) - for s := 0; s < b.N; s++ { + for s := 0; b.Loop(); s++ { series[s] = labels.FromStrings(name, name+strconv.Itoa(s)) labelhash[s] = series[s].Hash() } now := time.Now().UnixNano() - b.ResetTimer() - for ix := 0; ix < b.N; ix++ { + for ix := 0; b.Loop(); ix++ { c.UpdateSeries(series[ix], labelhash[ix], time.Unix(0, now+int64(ix)), false, copyFn) } } @@ -179,12 +178,12 @@ func benchmarkPurge(b *testing.B, twice bool) { series := [numSeries]labels.Labels{} labelhash := [numSeries]uint64{} - for s := 0; s < numSeries; s++ { + for s := range numSeries { series[s] = labels.FromStrings("a", strconv.Itoa(s)) labelhash[s] = series[s].Hash() } - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() // Prepare series diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go index b52ac69634b..ed8bacd45aa 100644 --- a/pkg/ingester/client/client.go +++ b/pkg/ingester/client/client.go @@ -205,7 +205,7 @@ func (c *closableHealthAndIngesterClient) Run(streamPushChan chan *streamWriteJo var workerErr error var wg sync.WaitGroup - for i := 0; i < INGESTER_CLIENT_STREAM_WORKER_COUNT; i++ { + for i := range INGESTER_CLIENT_STREAM_WORKER_COUNT { workerName := fmt.Sprintf("ingester-%s-stream-push-worker-%d", c.addr, i) wg.Add(1) go func() { diff --git a/pkg/ingester/client/client_test.go b/pkg/ingester/client/client_test.go index da41b03636c..02edc8d070d 100644 --- a/pkg/ingester/client/client_test.go +++ b/pkg/ingester/client/client_test.go @@ -22,7 +22,7 @@ func TestMarshall(t *testing.T) { recorder := httptest.NewRecorder() { req := cortexpb.WriteRequest{} - for i := 0; i < numSeries; i++ { + for i := range numSeries { req.Timeseries = append(req.Timeseries, cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ Labels: []cortexpb.LabelAdapter{ diff --git a/pkg/ingester/client/compat_test.go b/pkg/ingester/client/compat_test.go index 9914af6d066..8c90d58560c 100644 --- a/pkg/ingester/client/compat_test.go +++ b/pkg/ingester/client/compat_test.go @@ -63,7 +63,7 @@ func matchersEqual(expected, actual []*labels.Matcher) bool { return false } - for i := 0; i < len(expected); i++ { + for i := range expected { a := actual[i] e := expected[i] if a.Name != e.Name || a.Value != e.Value || a.Type != e.Type { @@ -85,8 +85,8 @@ func benchmarkSeriesMap(numSeries int, b *testing.B) { sm := make(map[string]int, numSeries) b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { + + for b.Loop() { for i, s := range series { sm[LabelsToKeyString(s)] = i } @@ -106,7 +106,7 @@ func benchmarkSeriesMap(numSeries int, b *testing.B) { func makeSeries(n int) []labels.Labels { series := make([]labels.Labels, 0, n) - for i := 0; i < n; i++ { + for i := range n { series = append(series, labels.FromMap(map[string]string{ "label0": "value0", "label1": "value1", diff --git a/pkg/ingester/client/cortex_util.go b/pkg/ingester/client/cortex_util.go index b3ba0e2d2be..5d463d49a7a 100644 --- a/pkg/ingester/client/cortex_util.go +++ b/pkg/ingester/client/cortex_util.go @@ -32,10 +32,7 @@ func SendLabelNamesStream(s Ingester_LabelNamesStreamServer, l *LabelNamesStream func SendAsBatchToStream(totalItems int, streamBatchSize int, fn func(start, end int) error) error { for i := 0; i < totalItems; i += streamBatchSize { - j := i + streamBatchSize - if j > totalItems { - j = totalItems - } + j := min(i+streamBatchSize, totalItems) if err := fn(i, j); err != nil { return err } diff --git a/pkg/ingester/client/cortex_util_test.go b/pkg/ingester/client/cortex_util_test.go index 3058026ebe9..3f1e02ddbca 100644 --- a/pkg/ingester/client/cortex_util_test.go +++ b/pkg/ingester/client/cortex_util_test.go @@ -117,7 +117,7 @@ func TestStreamingSends(t *testing.T) { clientCancel() // Wait until the cancelling has been propagated to the server. - test.Poll(t, time.Second, context.Canceled, func() interface{} { + test.Poll(t, time.Second, context.Canceled, func() any { return stream.Context().Err() }) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index c2dab4a54ec..62ec768ce2b 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -213,7 +213,7 @@ func (cfg *Config) getIgnoreSeriesLimitForMetricNamesMap() map[string]struct{} { result := map[string]struct{}{} - for _, s := range strings.Split(cfg.IgnoreSeriesLimitForMetricNames, ",") { + for s := range strings.SplitSeq(cfg.IgnoreSeriesLimitForMetricNames, ",") { tr := strings.TrimSpace(s) if tr != "" { result[tr] = struct{}{} @@ -1745,10 +1745,7 @@ func (i *Ingester) LabelValuesStream(req *client.LabelValuesRequest, stream clie } for i := 0; i < len(resp.LabelValues); i += metadataStreamBatchSize { - j := i + metadataStreamBatchSize - if j > len(resp.LabelValues) { - j = len(resp.LabelValues) - } + j := min(i+metadataStreamBatchSize, len(resp.LabelValues)) resp := &client.LabelValuesStreamResponse{ LabelValues: resp.LabelValues[i:j], } @@ -1842,10 +1839,7 @@ func (i *Ingester) LabelNamesStream(req *client.LabelNamesRequest, stream client } for i := 0; i < len(resp.LabelNames); i += metadataStreamBatchSize { - j := i + metadataStreamBatchSize - if j > len(resp.LabelNames) { - j = len(resp.LabelNames) - } + j := min(i+metadataStreamBatchSize, len(resp.LabelNames)) resp := &client.LabelNamesStreamResponse{ LabelNames: resp.LabelNames[i:j], } @@ -2612,7 +2606,6 @@ func (i *Ingester) closeAllTSDB() { // Concurrently close all users TSDB for userID, userDB := range i.TSDBState.dbs { - userID := userID go func(db *userTSDB) { defer wg.Done() diff --git a/pkg/ingester/ingester_no_race_test.go b/pkg/ingester/ingester_no_race_test.go index 656a7ab28c4..f6b7a28d279 100644 --- a/pkg/ingester/ingester_no_race_test.go +++ b/pkg/ingester/ingester_no_race_test.go @@ -38,7 +38,7 @@ func TestExpandedCachePostings_Race(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -48,10 +48,10 @@ func TestExpandedCachePostings_Race(t *testing.T) { labelNames := 100 seriesPerLabelName := 200 - for j := 0; j < labelNames; j++ { + for j := range labelNames { metricName := fmt.Sprintf("test_metric_%d", j) wg.Add(seriesPerLabelName * 2) - for k := 0; k < seriesPerLabelName; k++ { + for k := range seriesPerLabelName { go func() { defer wg.Done() _, err := i.Push(ctx, cortexpb.ToWriteRequest( diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index c59879a1d84..54931e81e28 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -145,15 +145,15 @@ func TestMatcherCache(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) ctx := user.InjectOrgID(context.Background(), userID) // Lets have 1 key evicted numberOfDifferentMatchers := cfg.MatchersCacheMaxItems + 1 callPerMatcher := 10 - for j := 0; j < numberOfDifferentMatchers; j++ { - for i := 0; i < callPerMatcher; i++ { + for j := range numberOfDifferentMatchers { + for range callPerMatcher { s := &mockQueryStreamServer{ctx: ctx} err = ing.QueryStream(&client.QueryRequest{ StartTimestampMs: math.MinInt64, @@ -212,7 +212,7 @@ func TestIngesterDeletionRace(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -220,7 +220,7 @@ func TestIngesterDeletionRace(t *testing.T) { wg := sync.WaitGroup{} wg.Add(numberOfTenants) - for i := 0; i < numberOfTenants; i++ { + for i := range numberOfTenants { go func() { defer wg.Done() u := fmt.Sprintf("userId_%v", i) @@ -236,8 +236,7 @@ func TestIngesterDeletionRace(t *testing.T) { wg.Wait() - ctx, c := context.WithCancel(context.Background()) - defer c() + ctx := t.Context() wg.Add(1) go func() { @@ -250,7 +249,7 @@ func TestIngesterDeletionRace(t *testing.T) { ing.closeAndDeleteIdleUserTSDBs(ctx) //nolint:errcheck }() - test.Poll(t, 5*time.Second, 0, func() interface{} { + test.Poll(t, 5*time.Second, 0, func() any { return len(ing.getTSDBUsers()) }) } @@ -295,7 +294,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -418,7 +417,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total", "cortex_discarded_samples_per_labelset_total")) // Adding 5 metrics with only 1 label - for i := 0; i < 5; i++ { + for i := range 5 { lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1"} _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) @@ -427,7 +426,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { // Adding 2 metrics with both labels (still below the limit) lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1", "comp2", "compValue2"} - for i := 0; i < 2; i++ { + for i := range 2 { _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) @@ -533,7 +532,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { tenantLimits.setLimits(userID, &limits) lbls = []string{labels.MetricName, "test_default"} - for i := 0; i < 2; i++ { + for i := range 2 { _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "series", strconv.Itoa(i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) @@ -696,7 +695,7 @@ func TestPushRace(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -710,8 +709,8 @@ func TestPushRace(t *testing.T) { numberOfSeries := 100 wg := sync.WaitGroup{} wg.Add(numberOfSeries * concurrentRequest) - for k := 0; k < numberOfSeries; k++ { - for i := 0; i < concurrentRequest; i++ { + for k := range numberOfSeries { + for range concurrentRequest { go func() { defer wg.Done() _, err := ing.Push(ctx, cortexpb.ToWriteRequest([]labels.Labels{labels.FromStrings(labels.MetricName, "foo", "userId", userID, "k", strconv.Itoa(k))}, []cortexpb.Sample{sample1}, nil, nil, cortexpb.API)) @@ -789,7 +788,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -900,7 +899,7 @@ func TestIngesterUserLimitExceededForNativeHistogram(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -957,7 +956,7 @@ func TestIngesterUserLimitExceededForNativeHistogram(t *testing.T) { } func benchmarkData(nSeries int) (allLabels []labels.Labels, allSamples []cortexpb.Sample) { - for j := 0; j < nSeries; j++ { + for j := range nSeries { lbls := chunk.BenchmarkLabels.Copy() builder := labels.NewBuilder(labels.EmptyLabels()) @@ -1015,7 +1014,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -2056,7 +2055,7 @@ func TestIngester_Push(t *testing.T) { ctx := user.InjectOrgID(context.Background(), userID) // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2286,7 +2285,7 @@ func TestIngester_PushNativeHistogramErrors(t *testing.T) { ctx := user.InjectOrgID(context.Background(), userID) // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2325,7 +2324,7 @@ func TestIngester_Push_ShouldCorrectlyTrackMetricsInMultiTenantScenario(t *testi defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2409,7 +2408,7 @@ func TestIngester_Push_DecreaseInactiveSeries(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2472,7 +2471,7 @@ func TestIngester_Push_OutOfOrderLabels(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2517,7 +2516,7 @@ func benchmarkIngesterPush(b *testing.B, limits validation.Limits, errorsExpecte defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() any { return ingester.lifecycler.GetState() }) @@ -2542,10 +2541,9 @@ func benchmarkIngesterPush(b *testing.B, limits validation.Limits, errorsExpecte allLabels, allSamples := benchmarkData(series) - b.ResetTimer() - for iter := 0; iter < b.N; iter++ { + for iter := 0; b.Loop(); iter++ { // Bump the timestamp on each of our test samples each time round the loop - for j := 0; j < samples; j++ { + for j := range samples { for i := range allSamples { allSamples[i].TimestampMs = startTime + int64(iter*samples+j+1) } @@ -2617,7 +2615,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { expectedErr := storage.ErrOutOfBounds.Error() // Push out of bound samples. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, expectedErr) @@ -2628,7 +2626,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true }, beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // For each series, push a single sample with a timestamp greater than next pushes. - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { currTimeReq := cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(labels.MetricName, metricName, "cardinality", strconv.Itoa(i))}, []cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}}, @@ -2644,7 +2642,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { expectedErr := storage.ErrOutOfOrderSample.Error() // Push out of order samples. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, expectedErr) @@ -2669,7 +2667,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with a different name than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, "per-user series limit") } @@ -2693,7 +2691,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with different labels than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, "per-metric series limit") } @@ -2716,7 +2714,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with different labels than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "push rate reached") } @@ -2738,7 +2736,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with different labels than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "max tenants limit reached") } @@ -2757,7 +2755,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { require.NoError(b, err) }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "max series limit reached") } @@ -2775,7 +2773,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { ingester.inflightPushRequests.Inc() }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "too many inflight push requests") } @@ -2817,7 +2815,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() any { return ingester.lifecycler.GetState() }) @@ -2837,7 +2835,6 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { start := make(chan struct{}) b.ReportAllocs() - b.ResetTimer() for c := 0; c < scenario.numConcurrentClients; c++ { go func() { @@ -2877,7 +2874,7 @@ func Test_Ingester_LabelNames(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2938,7 +2935,7 @@ func Test_Ingester_LabelValues(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2988,7 +2985,7 @@ func Test_Ingester_LabelValue_MaxInflightQueryRequest(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3104,7 +3101,7 @@ func Test_Ingester_Query(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3147,7 +3144,7 @@ func Test_Ingester_Query_MaxInflightQueryRequest(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3213,7 +3210,7 @@ func Test_Ingester_Query_ResourceThresholdBreached(t *testing.T) { require.NoError(t, err) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3515,7 +3512,7 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3530,7 +3527,6 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { // Run tests for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { req := &client.MetricsForLabelMatchersRequest{ @@ -3598,10 +3594,9 @@ func Benchmark_Ingester_MetricsForLabelMatchers(b *testing.B) { // fetching labels from blocks. i.Flush() - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { req := &client.MetricsForLabelMatchersRequest{ StartTimestampMs: math.MinInt64, EndTimestampMs: math.MaxInt64, @@ -3629,7 +3624,7 @@ func createIngesterWithSeries(t testing.TB, userID string, numSeries, numSamples }) // Wait until it's ACTIVE. - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3644,7 +3639,7 @@ func createIngesterWithSeries(t testing.TB, userID string, numSeries, numSamples metrics := make([]labels.Labels, 0, batchSize) samples := make([]cortexpb.Sample, 0, batchSize) - for s := 0; s < batchSize; s++ { + for s := range batchSize { metrics = append(metrics, labels.FromStrings("__name__", fmt.Sprintf("test_%d", o+s))) samples = append(samples, cortexpb.Sample{ TimestampMs: ts, @@ -3674,7 +3669,7 @@ func TestIngester_QueryStream(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE. - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3758,7 +3753,7 @@ func TestIngester_QueryStreamManySamplesChunks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE. - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3768,7 +3763,7 @@ func TestIngester_QueryStreamManySamplesChunks(t *testing.T) { const samplesCount = 1000000 samples := make([]cortexpb.Sample, 0, samplesCount) - for i := 0; i < samplesCount; i++ { + for i := range samplesCount { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i), @@ -3955,7 +3950,7 @@ func benchmarkQueryStream(b *testing.B, samplesCount, seriesCount int) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE. - test.Poll(b, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(b, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3964,14 +3959,14 @@ func benchmarkQueryStream(b *testing.B, samplesCount, seriesCount int) { samples := make([]cortexpb.Sample, 0, samplesCount) - for i := 0; i < samplesCount; i++ { + for i := range samplesCount { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i), }) } - for s := 0; s < seriesCount; s++ { + for s := range seriesCount { _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", strconv.Itoa(s)), samples)) require.NoError(b, err) } @@ -3989,10 +3984,9 @@ func benchmarkQueryStream(b *testing.B, samplesCount, seriesCount int) { mockStream := &mockQueryStreamServer{ctx: ctx} - b.ResetTimer() b.ReportAllocs() - for ix := 0; ix < b.N; ix++ { + for b.Loop() { err := i.QueryStream(req, mockStream) require.NoError(b, err) } @@ -4335,7 +4329,7 @@ func TestIngester_shipBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4381,7 +4375,7 @@ func TestIngester_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4434,7 +4428,7 @@ func TestIngester_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testin defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4472,7 +4466,7 @@ func TestIngester_sholdUpdateCacheShippedBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4512,7 +4506,7 @@ func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInP defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4533,7 +4527,7 @@ func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInP go i.shipBlocks(ctx, nil) // Wait until shipping starts. - test.Poll(t, 1*time.Second, activeShipping, func() interface{} { + test.Poll(t, 1*time.Second, activeShipping, func() any { db.stateMtx.RLock() defer db.stateMtx.RUnlock() return db.state @@ -4554,7 +4548,7 @@ func TestIngester_closingAndOpeningTsdbConcurrently(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4579,7 +4573,7 @@ func TestIngester_closingAndOpeningTsdbConcurrently(t *testing.T) { } }() - for k := 0; k < iterations; k++ { + for range iterations { i.closeAndDeleteUserTSDBIfIdle(userID) } @@ -4607,7 +4601,7 @@ func TestIngester_idleCloseEmptyTSDB(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4656,7 +4650,7 @@ func TestIngester_ReadNotFailWhenTSDBIsBeingDeleted(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4712,7 +4706,7 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4730,7 +4724,7 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { lastUpdate := db.lastUpdate.Load() // Wait until 1 second passes. - test.Poll(t, 1*time.Second, time.Now().Unix()+1, func() interface{} { + test.Poll(t, 1*time.Second, time.Now().Unix()+1, func() any { return time.Now().Unix() }) @@ -4951,7 +4945,7 @@ func TestIngester_flushing(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4978,7 +4972,7 @@ func TestIngester_ForFlush(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5047,7 +5041,7 @@ func Test_Ingester_UserStats(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5095,7 +5089,7 @@ func Test_Ingester_AllUserStats(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) for _, series := range series { @@ -5163,7 +5157,7 @@ func Test_Ingester_AllUserStatsHandler(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) for _, series := range series { @@ -5239,7 +5233,7 @@ func TestIngesterCompactIdleBlock(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5321,7 +5315,7 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5362,7 +5356,7 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { `), metricsToCheck...)) // Wait until TSDB has been closed and removed. - test.Poll(t, 10*time.Second, 0, func() interface{} { + test.Poll(t, 10*time.Second, 0, func() any { i.stoppedMtx.Lock() defer i.stoppedMtx.Unlock() return len(i.TSDBState.dbs) @@ -5466,7 +5460,7 @@ func TestHeadCompactionOnStartup(t *testing.T) { head := db.Head() l := labels.FromStrings("n", "v") - for i := 0; i < numFullChunks; i++ { + for i := range numFullChunks { // Not using db.Appender() as it checks for compaction. app := head.Appender(context.Background()) _, err := app.Append(0, l, int64(i)*chunkRange+1, 9.99) @@ -5523,7 +5517,7 @@ func TestIngester_CloseTSDBsOnShutdown(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5562,7 +5556,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5574,7 +5568,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { // Push some data to create 3 blocks. ctx := user.InjectOrgID(context.Background(), userID) - for j := int64(0); j < 5; j++ { + for j := range int64(5) { req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, j*chunkRangeMilliSec) _, err := i.Push(ctx, req) require.NoError(t, err) @@ -5664,7 +5658,7 @@ func TestIngesterPushErrorDuringForcedCompaction(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5699,12 +5693,12 @@ func TestIngesterNoFlushWithInFlightRequest(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) // Push few samples. - for j := 0; j < 5; j++ { + for range 5 { pushSingleSampleWithMetadata(t, i) } @@ -5731,7 +5725,7 @@ func TestIngesterNoFlushWithInFlightRequest(t *testing.T) { db.releaseAppendLock() // Let's wait until all head series have been flushed. - test.Poll(t, 5*time.Second, uint64(0), func() interface{} { + test.Poll(t, 5*time.Second, uint64(0), func() any { db, err := i.getTSDB(userID) if err != nil || db == nil { return false @@ -5751,7 +5745,7 @@ func TestIngester_PushInstanceLimits(t *testing.T) { limits InstanceLimits reqs map[string][]*cortexpb.WriteRequest expectedErr error - expectedErrType interface{} + expectedErrType any }{ "should succeed creating one user and series": { limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1}, @@ -5860,7 +5854,7 @@ func TestIngester_PushInstanceLimits(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5970,16 +5964,16 @@ func TestExpendedPostingsCacheIsolation(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) numberOfTenants := 100 wg := sync.WaitGroup{} - for k := 0; k < 10; k++ { + for k := range 10 { wg.Add(numberOfTenants) - for j := 0; j < numberOfTenants; j++ { + for j := range numberOfTenants { go func() { defer wg.Done() userId := fmt.Sprintf("user%v", j) @@ -5993,7 +5987,7 @@ func TestExpendedPostingsCacheIsolation(t *testing.T) { } wg.Add(numberOfTenants) - for j := 0; j < numberOfTenants; j++ { + for j := range numberOfTenants { go func() { defer wg.Done() userId := fmt.Sprintf("user%v", j) @@ -6031,7 +6025,7 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -6040,9 +6034,9 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { timeStamp := int64(60 * 1000) seriesCreated := map[string]labels.Labels{} - for i := 0; i < numberOfMetricNames; i++ { + for i := range numberOfMetricNames { metricName := fmt.Sprintf("metric_%v", i) - for j := 0; j < seriesPerMetricsNames; j++ { + for j := range seriesPerMetricsNames { s := labels.FromStrings(labels.MetricName, metricName, "labelA", fmt.Sprintf("series_%v", j)) _, err = ing.Push(ctx, cortexpb.ToWriteRequest([]labels.Labels{s}, []cortexpb.Sample{{Value: 2, TimestampMs: timeStamp}}, nil, nil, cortexpb.API)) seriesCreated[s.String()] = s @@ -6065,7 +6059,7 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { Value: "metric_0", } - for i := 0; i < 4; i++ { + for i := range 4 { tc := testCase{ matchers: []*client.LabelMatcher{nameMatcher}, } @@ -6196,7 +6190,7 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { db.postingCache.Clear() // lets run 2 times to hit the cache - for i := 0; i < 2; i++ { + for range 2 { verify(t, tc, r.startTs, r.endTs, r.hasSamples) } @@ -6322,7 +6316,7 @@ func TestExpendedPostingsCache(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6332,7 +6326,7 @@ func TestExpendedPostingsCache(t *testing.T) { totalSamples := 4 * 60 var samples = make([]cortexpb.Sample, 0, totalSamples) - for i := 0; i < totalSamples; i++ { + for i := range totalSamples { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i * 60 * 1000), @@ -6340,7 +6334,7 @@ func TestExpendedPostingsCache(t *testing.T) { } lbls := make([]labels.Labels, 0, len(samples)) - for j := 0; j < 10; j++ { + for j := range 10 { for i := 0; i < len(samples); i++ { lbls = append(lbls, labels.FromStrings(labels.MetricName, metricNames[i%len(metricNames)], "a", fmt.Sprintf("aaa%v", j))) } @@ -6498,7 +6492,7 @@ func TestExpendedPostingsCache(t *testing.T) { require.Equal(t, int64(0), postingsForMatchersCalls.Load()) if c.shouldExpireDueInactivity { - test.Poll(t, c.cacheConfig.Blocks.Ttl+c.cacheConfig.Head.Ttl+cfg.BlocksStorageConfig.TSDB.ExpandedCachingExpireInterval, 0, func() interface{} { + test.Poll(t, c.cacheConfig.Blocks.Ttl+c.cacheConfig.Head.Ttl+cfg.BlocksStorageConfig.TSDB.ExpandedCachingExpireInterval, 0, func() any { size := 0 for _, userID := range i.getTSDBUsers() { userDB, _ := i.getTSDB(userID) @@ -6526,7 +6520,7 @@ func TestIngester_inflightPushRequests(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6603,7 +6597,7 @@ func Test_Ingester_QueryExemplar_MaxInflightQueryRequest(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6626,7 +6620,7 @@ func generateSamplesForLabel(l labels.Labels, count int, sampleIntervalInMs int) var lbls = make([]labels.Labels, 0, count) var samples = make([]cortexpb.Sample, 0, count) - for i := 0; i < count; i++ { + for i := range count { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i * sampleIntervalInMs), @@ -6732,7 +6726,7 @@ func Test_Ingester_ModeHandler(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6741,7 +6735,7 @@ func Test_Ingester_ModeHandler(t *testing.T) { require.NoError(t, err) // Wait until initial state - test.Poll(t, 1*time.Second, testData.initialState, func() interface{} { + test.Poll(t, 1*time.Second, testData.initialState, func() any { return i.lifecycler.GetState() }) } @@ -6757,7 +6751,7 @@ func Test_Ingester_ModeHandler(t *testing.T) { require.Equal(t, testData.expectedState, i.lifecycler.GetState()) if testData.expectedIsReady { // Wait for instance to own tokens - test.Poll(t, 1*time.Second, nil, func() interface{} { + test.Poll(t, 1*time.Second, nil, func() any { return i.CheckReady(context.Background()) }) require.NoError(t, i.CheckReady(context.Background())) @@ -6992,7 +6986,7 @@ func TestIngester_UpdateLabelSetMetrics(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) // Add user ID. @@ -7054,7 +7048,7 @@ func TestIngesterPanicHandling(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) diff --git a/pkg/ingester/instance_limits.go b/pkg/ingester/instance_limits.go index cb48df3687e..cea165dd2f2 100644 --- a/pkg/ingester/instance_limits.go +++ b/pkg/ingester/instance_limits.go @@ -38,7 +38,7 @@ func (cfg *InstanceLimits) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix strin } // UnmarshalYAML implements the yaml.Unmarshaler interface. If give -func (l *InstanceLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (l *InstanceLimits) UnmarshalYAML(unmarshal func(any) error) error { if defaultInstanceLimits != nil { *l = *defaultInstanceLimits } diff --git a/pkg/ingester/lifecycle_test.go b/pkg/ingester/lifecycle_test.go index 4fab7d716e0..a0fd0ad868b 100644 --- a/pkg/ingester/lifecycle_test.go +++ b/pkg/ingester/lifecycle_test.go @@ -73,7 +73,7 @@ func TestIngesterRestart(t *testing.T) { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ingester)) } - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + test.Poll(t, 100*time.Millisecond, 1, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) @@ -88,7 +88,7 @@ func TestIngesterRestart(t *testing.T) { time.Sleep(200 * time.Millisecond) - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + test.Poll(t, 100*time.Millisecond, 1, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) } @@ -104,7 +104,7 @@ func TestIngester_ShutdownHandler(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), ingester)) // Make sure the ingester has been added to the ring. - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + test.Poll(t, 100*time.Millisecond, 1, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) @@ -113,7 +113,7 @@ func TestIngester_ShutdownHandler(t *testing.T) { require.Equal(t, http.StatusNoContent, recorder.Result().StatusCode) // Make sure the ingester has been removed from the ring even when UnregisterFromRing is false. - test.Poll(t, 100*time.Millisecond, 0, func() interface{} { + test.Poll(t, 100*time.Millisecond, 0, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) }) diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go index 82a53f9dbd9..b0601dbc495 100644 --- a/pkg/ingester/limiter_test.go +++ b/pkg/ingester/limiter_test.go @@ -221,7 +221,6 @@ func runLimiterMaxFunctionTest( } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -288,7 +287,6 @@ func TestLimiter_AssertMaxSeriesPerMetric(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -349,7 +347,6 @@ func TestLimiter_AssertMaxMetadataPerMetric(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -411,7 +408,6 @@ func TestLimiter_AssertMaxSeriesPerUser(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -473,7 +469,6 @@ func TestLimiter_AssertMaxNativeHistogramsSeriesPerUser(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -557,7 +552,6 @@ func TestLimiter_AssertMaxSeriesPerLabelSet(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -618,7 +612,6 @@ func TestLimiter_AssertMaxMetricsWithMetadataPerUser(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -714,7 +707,6 @@ func TestLimiter_minNonZero(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { assert.Equal(t, testData.expected, minNonZero(testData.first, testData.second)) diff --git a/pkg/ingester/user_state.go b/pkg/ingester/user_state.go index 032c6907d8c..2918c8993aa 100644 --- a/pkg/ingester/user_state.go +++ b/pkg/ingester/user_state.go @@ -38,7 +38,7 @@ type metricCounter struct { func newMetricCounter(limiter *Limiter, ignoredMetricsForSeriesCount map[string]struct{}) *metricCounter { shards := make([]metricCounterShard, 0, numMetricCounterShards) - for i := 0; i < numMetricCounterShards; i++ { + for range numMetricCounterShards { shards = append(shards, metricCounterShard{ m: map[string]int{}, }) @@ -103,7 +103,7 @@ type labelSetCounter struct { func newLabelSetCounter(limiter *Limiter) *labelSetCounter { shards := make([]*labelSetCounterShard, 0, numMetricCounterShards) - for i := 0; i < numMetricCounterShards; i++ { + for range numMetricCounterShards { shards = append(shards, &labelSetCounterShard{ RWMutex: &sync.RWMutex{}, valuesCounter: map[uint64]*labelSetCounterEntry{}, @@ -252,7 +252,7 @@ func (m *labelSetCounter) UpdateMetric(ctx context.Context, u *userTSDB, metrics } nonDefaultPartitionChanged := false - for i := 0; i < numMetricCounterShards; i++ { + for i := range numMetricCounterShards { s := m.shards[i] s.RLock() for h, entry := range s.valuesCounter { diff --git a/pkg/parquetconverter/converter_test.go b/pkg/parquetconverter/converter_test.go index 70b6469a7ba..fbaa947b95e 100644 --- a/pkg/parquetconverter/converter_test.go +++ b/pkg/parquetconverter/converter_test.go @@ -89,7 +89,7 @@ func TestConverter(t *testing.T) { blocksConverted := []ulid.ULID{} - test.Poll(t, 3*time.Minute, 1, func() interface{} { + test.Poll(t, 3*time.Minute, 1, func() any { blocksConverted = blocksConverted[:0] for _, bIds := range blocks { m, err := parquet.ReadConverterMark(ctx, bIds, userBucket, logger) @@ -128,12 +128,12 @@ func TestConverter(t *testing.T) { require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucketClient), user, cortex_tsdb.NewTenantDeletionMark(time.Now()))) // Should clean sync folders - test.Poll(t, time.Minute, 0, func() interface{} { + test.Poll(t, time.Minute, 0, func() any { return len(c.listTenantsWithMetaSyncDirectories()) }) // Verify metrics after user deletion - test.Poll(t, time.Minute*10, true, func() interface{} { + test.Poll(t, time.Minute*10, true, func() any { if testutil.ToFloat64(c.metrics.convertedBlocks.WithLabelValues(user)) != 0.0 { return false } diff --git a/pkg/querier/batch/batch.go b/pkg/querier/batch/batch.go index 79dfe8081e3..af645d14dc0 100644 --- a/pkg/querier/batch/batch.go +++ b/pkg/querier/batch/batch.go @@ -55,7 +55,6 @@ type iterator interface { func NewChunkMergeIterator(it chunkenc.Iterator, chunks []chunk.Chunk, _, _ model.Time) chunkenc.Iterator { converted := make([]GenericChunk, len(chunks)) for i, c := range chunks { - c := c converted[i] = NewGenericChunk(int64(c.From), int64(c.Through), c.NewIterator) } @@ -141,10 +140,7 @@ func (a *iteratorAdapter) Next() chunkenc.ValueType { a.curr.Index++ for a.curr.Index >= a.curr.Length && a.underlying.Next(a.batchSize) != chunkenc.ValNone { a.curr = a.underlying.Batch() - a.batchSize = a.batchSize * 2 - if a.batchSize > chunk.BatchSize { - a.batchSize = chunk.BatchSize - } + a.batchSize = min(a.batchSize*2, chunk.BatchSize) } if a.curr.Index < a.curr.Length { return a.curr.ValType diff --git a/pkg/querier/batch/batch_test.go b/pkg/querier/batch/batch_test.go index 4f4b57bfe4d..d90a0e1033e 100644 --- a/pkg/querier/batch/batch_test.go +++ b/pkg/querier/batch/batch_test.go @@ -51,12 +51,11 @@ func BenchmarkNewChunkMergeIterator_CreateAndIterate(b *testing.B) { chunks := createChunks(b, step, scenario.numChunks, scenario.numSamplesPerChunk, scenario.duplicationFactor, scenario.enc) - b.ResetTimer() b.Run(name, func(b *testing.B) { b.ReportAllocs() var it chunkenc.Iterator - for n := 0; n < b.N; n++ { + for b.Loop() { it = NewChunkMergeIterator(it, chunks, 0, 0) for it.Next() != chunkenc.ValNone { it.At() @@ -106,11 +105,10 @@ func BenchmarkNewChunkMergeIterator_Seek(b *testing.B) { chunks := createChunks(b, scenario.scrapeInterval, scenario.numChunks, scenario.numSamplesPerChunk, scenario.duplicationFactor, scenario.enc) - b.ResetTimer() b.Run(name, func(b *testing.B) { b.ReportAllocs() var it chunkenc.Iterator - for n := 0; n < b.N; n++ { + for b.Loop() { it = NewChunkMergeIterator(it, chunks, 0, 0) i := int64(0) for it.Seek(i*scenario.seekStep.Milliseconds()) != chunkenc.ValNone { @@ -164,8 +162,8 @@ func TestSeekCorrectlyDealWithSinglePointChunks(t *testing.T) { func createChunks(b *testing.B, step time.Duration, numChunks, numSamplesPerChunk, duplicationFactor int, enc promchunk.Encoding) []chunk.Chunk { result := make([]chunk.Chunk, 0, numChunks) - for d := 0; d < duplicationFactor; d++ { - for c := 0; c < numChunks; c++ { + for range duplicationFactor { + for c := range numChunks { minTime := step * time.Duration(c*numSamplesPerChunk) result = append(result, util.GenerateChunk(b, step, model.Time(minTime.Milliseconds()), numSamplesPerChunk, enc)) } diff --git a/pkg/querier/batch/chunk_test.go b/pkg/querier/batch/chunk_test.go index becb4e7dff9..623de16601e 100644 --- a/pkg/querier/batch/chunk_test.go +++ b/pkg/querier/batch/chunk_test.go @@ -39,7 +39,6 @@ func forEncodings(t *testing.T, f func(t *testing.T, enc promchunk.Encoding)) { promchunk.PrometheusHistogramChunk, //promchunk.PrometheusFloatHistogramChunk, } { - enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() f(t, enc) @@ -55,7 +54,7 @@ func mkGenericChunk(t require.TestingT, from model.Time, points int, enc promchu func testIter(t require.TestingT, points int, iter chunkenc.Iterator, enc promchunk.Encoding) { histograms := histogram_util.GenerateTestHistograms(0, 1000, points) ets := model.TimeFromUnix(0) - for i := 0; i < points; i++ { + for i := range points { require.Equal(t, iter.Next(), enc.ChunkValueType(), strconv.Itoa(i)) switch enc { case promchunk.PrometheusXorChunk: @@ -132,7 +131,7 @@ func TestSeek(t *testing.T) { it: &it, } - for i := 0; i < chunk.BatchSize-1; i++ { + for i := range chunk.BatchSize - 1 { require.Equal(t, chunkenc.ValFloat, c.Seek(int64(i), 1)) } require.Equal(t, 1, it.seeks) @@ -159,7 +158,7 @@ func (i *mockIterator) Batch(size int, valType chunkenc.ValueType) chunk.Batch { Length: chunk.BatchSize, ValType: valType, } - for i := 0; i < chunk.BatchSize; i++ { + for i := range chunk.BatchSize { batch.Timestamps[i] = int64(i) } return batch diff --git a/pkg/querier/batch/merge.go b/pkg/querier/batch/merge.go index 27030149d21..33c0f91787e 100644 --- a/pkg/querier/batch/merge.go +++ b/pkg/querier/batch/merge.go @@ -70,12 +70,12 @@ func (c *mergeIterator) Reset(size int) *mergeIterator { c.batchesBuf = make(batchStream, len(c.its)) } else { c.batchesBuf = c.batchesBuf[:size] - for i := 0; i < size; i++ { + for i := range size { c.batchesBuf[i] = promchunk.Batch{} } } - for i := 0; i < len(c.nextBatchBuf); i++ { + for i := range len(c.nextBatchBuf) { c.nextBatchBuf[i] = promchunk.Batch{} } @@ -192,11 +192,11 @@ func (h *iteratorHeap) Less(i, j int) bool { return iT < jT } -func (h *iteratorHeap) Push(x interface{}) { +func (h *iteratorHeap) Push(x any) { *h = append(*h, x.(iterator)) } -func (h *iteratorHeap) Pop() interface{} { +func (h *iteratorHeap) Pop() any { old := *h n := len(old) x := old[n-1] diff --git a/pkg/querier/batch/merge_test.go b/pkg/querier/batch/merge_test.go index d835640d704..a7ab54b94b8 100644 --- a/pkg/querier/batch/merge_test.go +++ b/pkg/querier/batch/merge_test.go @@ -30,16 +30,15 @@ func TestMergeIter(t *testing.T) { func BenchmarkMergeIterator(b *testing.B) { chunks := make([]GenericChunk, 0, 10) - for i := 0; i < 10; i++ { + for i := range 10 { chunks = append(chunks, mkGenericChunk(b, model.Time(i*25), 120, encoding.PrometheusXorChunk)) } iter := newMergeIterator(nil, chunks) for _, r := range []bool{true, false} { b.Run(fmt.Sprintf("reuse-%t", r), func(b *testing.B) { - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { if r { iter = newMergeIterator(iter, chunks) } else { @@ -64,7 +63,7 @@ func TestMergeHarder(t *testing.T) { offset = 30 samples = 100 ) - for i := 0; i < numChunks; i++ { + for range numChunks { chunks = append(chunks, mkGenericChunk(t, from, samples, enc)) from = from.Add(time.Duration(offset) * time.Second) } diff --git a/pkg/querier/batch/non_overlapping_test.go b/pkg/querier/batch/non_overlapping_test.go index 2377e8c3fa4..7fc44086665 100644 --- a/pkg/querier/batch/non_overlapping_test.go +++ b/pkg/querier/batch/non_overlapping_test.go @@ -12,7 +12,7 @@ func TestNonOverlappingIter(t *testing.T) { t.Parallel() forEncodings(t, func(t *testing.T, enc encoding.Encoding) { cs := []GenericChunk(nil) - for i := int64(0); i < 100; i++ { + for i := range int64(100) { cs = append(cs, mkGenericChunk(t, model.TimeFromUnix(i*10), 10, enc)) } testIter(t, 10*100, newIteratorAdapter(newNonOverlappingIterator(cs)), enc) diff --git a/pkg/querier/batch/stream_test.go b/pkg/querier/batch/stream_test.go index 2274cf7aa07..41148e890f9 100644 --- a/pkg/querier/batch/stream_test.go +++ b/pkg/querier/batch/stream_test.go @@ -47,7 +47,6 @@ func TestStream(t *testing.T) { output: []promchunk.Batch{mkBatch(0, enc), mkBatch(promchunk.BatchSize, enc), mkBatch(2*promchunk.BatchSize, enc), mkBatch(3*promchunk.BatchSize, enc)}, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() result := make(batchStream, len(tc.input1)+len(tc.input2)) @@ -60,7 +59,7 @@ func TestStream(t *testing.T) { func mkBatch(from int64, enc encoding.Encoding) promchunk.Batch { var result promchunk.Batch - for i := int64(0); i < promchunk.BatchSize; i++ { + for i := range int64(promchunk.BatchSize) { result.Timestamps[i] = from + i switch enc { case encoding.PrometheusXorChunk: @@ -91,13 +90,13 @@ func testHistogram(count, numSpans, numBuckets int) *histogram.Histogram { NegativeBuckets: make([]int64, bucketsPerSide), PositiveBuckets: make([]int64, bucketsPerSide), } - for j := 0; j < numSpans; j++ { + for j := range numSpans { s := histogram.Span{Offset: 1, Length: spanLength} h.NegativeSpans[j] = s h.PositiveSpans[j] = s } - for j := 0; j < bucketsPerSide; j++ { + for j := range bucketsPerSide { h.NegativeBuckets[j] = 1 h.PositiveBuckets[j] = 1 } diff --git a/pkg/querier/blocks_consistency_checker_test.go b/pkg/querier/blocks_consistency_checker_test.go index 2b3bce3bcf7..5da829dbaf9 100644 --- a/pkg/querier/blocks_consistency_checker_test.go +++ b/pkg/querier/blocks_consistency_checker_test.go @@ -103,7 +103,6 @@ func TestBlocksConsistencyChecker_Check(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/querier/blocks_finder_bucket_index_test.go b/pkg/querier/blocks_finder_bucket_index_test.go index 99675d4748f..d5404dbc8b3 100644 --- a/pkg/querier/blocks_finder_bucket_index_test.go +++ b/pkg/querier/blocks_finder_bucket_index_test.go @@ -121,7 +121,6 @@ func TestBucketIndexBlocksFinder_GetBlocks(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -162,9 +161,7 @@ func BenchmarkBucketIndexBlocksFinder_GetBlocks(b *testing.B) { require.NoError(b, bucketindex.WriteIndex(ctx, bkt, userID, nil, idx)) finder := prepareBucketIndexBlocksFinder(b, bkt) - b.ResetTimer() - - for n := 0; n < b.N; n++ { + for b.Loop() { blocks, marks, err := finder.GetBlocks(ctx, userID, 100, 200, nil) if err != nil || len(blocks) != 11 || len(marks) != 11 { b.Fail() diff --git a/pkg/querier/blocks_finder_bucket_scan.go b/pkg/querier/blocks_finder_bucket_scan.go index d047fd1421f..aef1543cc9e 100644 --- a/pkg/querier/blocks_finder_bucket_scan.go +++ b/pkg/querier/blocks_finder_bucket_scan.go @@ -2,6 +2,7 @@ package querier import ( "context" + "maps" "path" "path/filepath" "sort" @@ -257,17 +258,11 @@ pushJobsLoop: } else { // If an error occurred, we prefer to partially update the metas map instead of // not updating it at all. At least we'll update blocks for the successful tenants. - for userID, metas := range resMetas { - d.userMetas[userID] = metas - } + maps.Copy(d.userMetas, resMetas) - for userID, metas := range resMetasLookup { - d.userMetasLookup[userID] = metas - } + maps.Copy(d.userMetasLookup, resMetasLookup) - for userID, deletionMarks := range resDeletionMarks { - d.userDeletionMarks[userID] = deletionMarks - } + maps.Copy(d.userDeletionMarks, resDeletionMarks) } d.userMx.Unlock() diff --git a/pkg/querier/blocks_finder_bucket_scan_test.go b/pkg/querier/blocks_finder_bucket_scan_test.go index b81f6d7f910..9313afffdf4 100644 --- a/pkg/querier/blocks_finder_bucket_scan_test.go +++ b/pkg/querier/blocks_finder_bucket_scan_test.go @@ -502,7 +502,6 @@ func TestBucketScanBlocksFinder_GetBlocks(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/querier/blocks_store_balanced_set.go b/pkg/querier/blocks_store_balanced_set.go index b69f9cf4392..3967df03e40 100644 --- a/pkg/querier/blocks_store_balanced_set.go +++ b/pkg/querier/blocks_store_balanced_set.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/rand" + "slices" "strings" "time" @@ -16,7 +17,6 @@ import ( "github.com/thanos-io/thanos/pkg/extprom" "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -94,7 +94,7 @@ func (s *blocksStoreBalancedSet) GetClientsFor(_ string, blockIDs []ulid.ULID, e func getFirstNonExcludedAddr(addresses, exclude []string) string { for _, addr := range addresses { - if !util.StringsContain(exclude, addr) { + if !slices.Contains(exclude, addr) { return addr } } diff --git a/pkg/querier/blocks_store_balanced_set_test.go b/pkg/querier/blocks_store_balanced_set_test.go index 6de7a105fd6..0af9719eb37 100644 --- a/pkg/querier/blocks_store_balanced_set_test.go +++ b/pkg/querier/blocks_store_balanced_set_test.go @@ -33,7 +33,7 @@ func TestBlocksStoreBalancedSet_GetClientsFor(t *testing.T) { // of returned clients (we expect an even distribution). clientsCount := map[string]int{} - for i := 0; i < numGets; i++ { + for range numGets { clients, err := s.GetClientsFor("", []ulid.ULID{block1}, map[ulid.ULID][]string{}, nil) require.NoError(t, err) require.Len(t, clients, 1) @@ -131,7 +131,6 @@ func TestBlocksStoreBalancedSet_GetClientsFor_Exclude(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 41cf8201634..8041122024e 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -637,8 +637,6 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( // Concurrently fetch series from all clients. for c, blockIDs := range clients { // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs g.Go(func() error { // See: https://github.com/prometheus/prometheus/pull/8050 @@ -860,8 +858,6 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( // Concurrently fetch series from all clients. for c, blockIDs := range clients { // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs g.Go(func() error { req, err := createLabelNamesRequest(minT, maxT, limit, blockIDs, matchers) @@ -967,8 +963,6 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( // Concurrently fetch series from all clients. for c, blockIDs := range clients { // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs g.Go(func() error { req, err := createLabelValuesRequest(minT, maxT, limit, name, blockIDs, matchers...) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 51001f1346d..70581a7abfb 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -90,7 +90,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { tests := map[string]struct { finderResult bucketindex.Blocks finderErr error - storeSetResponses []interface{} + storeSetResponses []any limits BlocksStoreLimits queryLimiter *limiter.QueryLimiter seriesLimit int @@ -115,7 +115,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ errors.New("no client found"), }, limits: &blocksStoreLimitsMock{}, @@ -127,7 +127,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -152,7 +152,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -184,7 +184,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -216,7 +216,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -247,7 +247,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -291,7 +291,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -335,7 +335,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), @@ -364,7 +364,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -405,7 +405,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -446,7 +446,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -475,7 +475,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -517,7 +517,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -559,7 +559,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -629,7 +629,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -663,7 +663,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -729,7 +729,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( @@ -795,7 +795,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ @@ -817,7 +817,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ @@ -843,7 +843,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ @@ -922,7 +922,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -947,7 +947,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -964,7 +964,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ @@ -984,7 +984,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ @@ -1004,7 +1004,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -1021,7 +1021,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ @@ -1041,7 +1041,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ @@ -1063,7 +1063,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ @@ -1101,7 +1101,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ @@ -1137,7 +1137,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), @@ -1155,7 +1155,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, @@ -1181,7 +1181,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, @@ -1207,7 +1207,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -1224,7 +1224,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ @@ -1243,7 +1243,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ @@ -1262,7 +1262,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), @@ -1279,7 +1279,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ @@ -1298,7 +1298,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ @@ -1316,7 +1316,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1345,7 +1345,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1374,7 +1374,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1404,7 +1404,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, }, expectedErr: validation.AccessDeniedError("PermissionDenied"), - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1441,7 +1441,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1473,7 +1473,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1502,7 +1502,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1523,7 +1523,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1551,7 +1551,6 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -1655,7 +1654,7 @@ func TestOverrideBlockDiscovery(t *testing.T) { minT := int64(10) maxT := int64(20) - stores := &blocksStoreSetMock{mockedResponses: []interface{}{ + stores := &blocksStoreSetMock{mockedResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockHintsResponse(block1), @@ -1720,7 +1719,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { finderResult bucketindex.Blocks finderErr error limit int - storeSetResponses []interface{} + storeSetResponses []any expectedLabelNames []string expectedLabelValues []string // For __name__ expectedErr string @@ -1739,7 +1738,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ errors.New("no client found"), }, expectedErr: "no client found", @@ -1749,7 +1748,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1774,7 +1773,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1812,7 +1811,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1853,7 +1852,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { // Block1 has series1 and series2 // Block2 has only series1 // Block3 has only series2 - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1937,7 +1936,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { // Block1 has series1 and series2 // Block2 has only series1 // Block3 has only series2 - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1988,7 +1987,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ @@ -2017,7 +2016,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ @@ -2063,7 +2062,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ @@ -2162,7 +2161,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -2196,7 +2195,6 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { } for testName, testData := range tests { - testData := testData var hints *storage.LabelHints if testData.limit > 0 { hints = &storage.LabelHints{ @@ -2316,7 +2314,6 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -2441,7 +2438,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { stores := &blocksStoreSetMock{ Service: services.NewIdleService(nil, nil), - mockedResponses: []interface{}{ + mockedResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ gateway1: {block1}, gateway2: {block2}, @@ -2482,7 +2479,6 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { require.Equal(t, f.T, int64(f.F)) } for i, h := range m.Histograms { - h := h // Check sample timestamp is expected. require.Equal(t, h.T, int64(from)+int64(i)*15000) expectedH := tsdbutil.GenerateTestGaugeFloatHistogram(h.T) @@ -2518,7 +2514,7 @@ func TestBlocksStoreQuerier_ShouldRetryResourceBasedThrottlingError(t *testing.T type blocksStoreSetMock struct { services.Service - mockedResponses []interface{} + mockedResponses []any nextResult int queriedBlocks []ulid.ULID } diff --git a/pkg/querier/blocks_store_replicated_set.go b/pkg/querier/blocks_store_replicated_set.go index 3305db3b47f..d102a522e24 100644 --- a/pkg/querier/blocks_store_replicated_set.go +++ b/pkg/querier/blocks_store_replicated_set.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "math/rand" + "slices" "github.com/go-kit/log" "github.com/oklog/ulid/v2" @@ -179,7 +180,7 @@ func getNonExcludedInstance(set ring.ReplicationSet, exclude []string, balancing } } for _, instance := range set.Instances { - if util.StringsContain(exclude, instance.Addr) { + if slices.Contains(exclude, instance.Addr) { continue } // If zone awareness is not enabled, pick first non-excluded instance. diff --git a/pkg/querier/blocks_store_replicated_set_test.go b/pkg/querier/blocks_store_replicated_set_test.go index 62bf96270c6..6ccc65d4a82 100644 --- a/pkg/querier/blocks_store_replicated_set_test.go +++ b/pkg/querier/blocks_store_replicated_set_test.go @@ -555,7 +555,6 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -566,7 +565,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() testData.setup(d) return d, true, nil @@ -591,7 +590,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, s) //nolint:errcheck // Wait until the ring client has initialised the state. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { all, err := r.GetAllHealthy(ring.Read) return err == nil && len(all.Instances) > 0 }) @@ -629,7 +628,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() for n := 1; n <= numInstances; n++ { d.AddIngester(fmt.Sprintf("instance-%d", n), fmt.Sprintf("127.0.0.%d", n), "", []uint32{uint32(n)}, ring.ACTIVE, registeredAt) @@ -653,7 +652,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin defer services.StopAndAwaitTerminated(ctx, s) //nolint:errcheck // Wait until the ring client has initialised the state. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { all, err := r.GetAllHealthy(ring.Read) return err == nil && len(all.Instances) > 0 }) @@ -662,7 +661,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin // requests across store-gateways is balanced. distribution := map[string]int{} - for n := 0; n < numRuns; n++ { + for range numRuns { clients, err := s.GetClientsFor(userID, []ulid.ULID{block1}, nil, nil) require.NoError(t, err) require.Len(t, clients, 1) @@ -697,7 +696,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ZoneAwareness(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() for n := 1; n <= numInstances; n++ { zone := strconv.Itoa((n-1)%3 + 1) @@ -722,14 +721,14 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ZoneAwareness(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, s) //nolint:errcheck // Wait until the ring client has initialised the state. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { all, err := r.GetAllHealthy(ring.Read) return err == nil && len(all.Instances) > 0 }) // Target hit shouldn't exist in the blocksMap. targets := [3]int{3, 2, 1} - for i := 0; i < numRuns; i++ { + for i := range numRuns { blocksMap := [3]map[string]int{ {"1": 1, "2": 1}, {"1": 1, "3": 1}, diff --git a/pkg/querier/chunk_store_queryable_test.go b/pkg/querier/chunk_store_queryable_test.go index 1ecbb438d6d..9c6c84363a9 100644 --- a/pkg/querier/chunk_store_queryable_test.go +++ b/pkg/querier/chunk_store_queryable_test.go @@ -27,7 +27,7 @@ func makeMockChunks(t require.TestingT, numChunks int, enc encoding.Encoding, fr var ( chunks = make([]chunk.Chunk, 0, numChunks) ) - for i := 0; i < numChunks; i++ { + for range numChunks { c := util.GenerateChunk(t, sampleRate, from, int(samplesPerChunk), enc, additionalLabels...) chunks = append(chunks, c) from = from.Add(chunkOffset) diff --git a/pkg/querier/codec/protobuf_codec.go b/pkg/querier/codec/protobuf_codec.go index 733e61c79bd..8a6526f7db3 100644 --- a/pkg/querier/codec/protobuf_codec.go +++ b/pkg/querier/codec/protobuf_codec.go @@ -100,7 +100,7 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { sampleStreamsLen := len(data.Result.(promql.Matrix)) sampleStreams := make([]tripperware.SampleStream, sampleStreamsLen) - for i := 0; i < sampleStreamsLen; i++ { + for i := range sampleStreamsLen { sampleStream := data.Result.(promql.Matrix)[i] labelsLen := sampleStream.Metric.Len() var lbls []cortexpb.LabelAdapter @@ -120,7 +120,7 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { var samples []cortexpb.Sample if samplesLen > 0 { samples = make([]cortexpb.Sample, samplesLen) - for j := 0; j < samplesLen; j++ { + for j := range samplesLen { samples[j] = cortexpb.Sample{ Value: sampleStream.Floats[j].F, TimestampMs: sampleStream.Floats[j].T, @@ -132,7 +132,7 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { var histograms []tripperware.SampleHistogramPair if histogramsLen > 0 { histograms = make([]tripperware.SampleHistogramPair, histogramsLen) - for j := 0; j < histogramsLen; j++ { + for j := range histogramsLen { bucketsLen := len(sampleStream.Histograms[j].H.NegativeBuckets) + len(sampleStream.Histograms[j].H.PositiveBuckets) if sampleStream.Histograms[j].H.ZeroCount > 0 { bucketsLen = len(sampleStream.Histograms[j].H.NegativeBuckets) + len(sampleStream.Histograms[j].H.PositiveBuckets) + 1 @@ -157,7 +157,7 @@ func getVectorSamples(data *v1.QueryData, cortexInternal bool) *[]tripperware.Sa vectorSamplesLen := len(data.Result.(promql.Vector)) vectorSamples := make([]tripperware.Sample, vectorSamplesLen) - for i := 0; i < vectorSamplesLen; i++ { + for i := range vectorSamplesLen { sample := data.Result.(promql.Vector)[i] labelsLen := sample.Metric.Len() var lbls []cortexpb.LabelAdapter @@ -243,7 +243,7 @@ func getBuckets(bucketsLen int, it histogram.BucketIterator[float64]) []*tripper func getStats(builtin *stats.BuiltinStats) *tripperware.PrometheusResponseSamplesStats { queryableSamplesStatsPerStepLen := len(builtin.Samples.TotalQueryableSamplesPerStep) queryableSamplesStatsPerStep := make([]*tripperware.PrometheusResponseQueryableSamplesStatsPerStep, queryableSamplesStatsPerStepLen) - for i := 0; i < queryableSamplesStatsPerStepLen; i++ { + for i := range queryableSamplesStatsPerStepLen { queryableSamplesStatsPerStep[i] = &tripperware.PrometheusResponseQueryableSamplesStatsPerStep{ Value: builtin.Samples.TotalQueryableSamplesPerStep[i].V, TimestampMs: builtin.Samples.TotalQueryableSamplesPerStep[i].T, diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index d7313bdf396..825da08860f 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -82,7 +82,6 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) for _, streamingMetadataEnabled := range []bool{false, true} { for testName, testData := range tests { - testData := testData t.Run(fmt.Sprintf("%s (streaming metadata enabled: %t)", testName, streamingMetadataEnabled), func(t *testing.T) { t.Parallel() diff --git a/pkg/querier/parquet_queryable_test.go b/pkg/querier/parquet_queryable_test.go index b8729715d41..e842a69dda8 100644 --- a/pkg/querier/parquet_queryable_test.go +++ b/pkg/querier/parquet_queryable_test.go @@ -50,7 +50,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { maxT := util.TimeToMillis(time.Now()) createStore := func() *blocksStoreSetMock { - return &blocksStoreSetMock{mockedResponses: []interface{}{ + return &blocksStoreSetMock{mockedResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ @@ -415,7 +415,7 @@ func TestParquetQueryable_Limits(t *testing.T) { ctx := context.Background() seriesCount := 100 lbls := make([]labels.Labels, seriesCount) - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { lbls[i] = labels.FromStrings(labels.MetricName, metricName, "series", strconv.Itoa(i)) } @@ -514,7 +514,6 @@ func TestParquetQueryable_Limits(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -712,13 +711,13 @@ func TestMaterializedLabelsFilterCallbackConcurrent(t *testing.T) { By: true, Labels: []string{"__name__"}, } - for i := 0; i < 10; i++ { + for range 10 { go func() { defer wg.Done() ctx := injectShardInfoIntoContext(context.Background(), si) filter, exists := materializedLabelsFilterCallback(ctx, nil) require.Equal(t, true, exists) - for j := 0; j < 1000; j++ { + for j := range 1000 { filter.Filter(labels.FromStrings("__name__", "test_metric", "label_1", strconv.Itoa(j))) } filter.Close() @@ -734,7 +733,7 @@ func TestParquetQueryableFallbackDisabled(t *testing.T) { maxT := util.TimeToMillis(time.Now()) createStore := func() *blocksStoreSetMock { - return &blocksStoreSetMock{mockedResponses: []interface{}{ + return &blocksStoreSetMock{mockedResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 9160f1c4112..a9652188f5d 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -499,7 +499,6 @@ func (q querier) LabelValues(ctx context.Context, name string, hints *storage.La for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. - querier := querier g.Go(func() error { // NB: Values are sorted in Cortex already. myValues, myWarnings, err := querier.LabelValues(ctx, name, hints, matchers...) @@ -568,7 +567,6 @@ func (q querier) LabelNames(ctx context.Context, hints *storage.LabelHints, matc for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. - querier := querier g.Go(func() error { // NB: Names are sorted in Cortex already. myNames, myWarnings, err := querier.LabelNames(ctx, hints, matchers...) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 3c48c0ab7d5..cf7855eafa3 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -51,11 +51,11 @@ const ( type wrappedQuerier struct { storage.Querier - selectCallsArgs [][]interface{} + selectCallsArgs [][]any } func (q *wrappedQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - q.selectCallsArgs = append(q.selectCallsArgs, []interface{}{sortSeries, hints, matchers}) + q.selectCallsArgs = append(q.selectCallsArgs, []any{sortSeries, hints, matchers}) return q.Querier.Select(ctx, sortSeries, hints, matchers...) } @@ -325,7 +325,6 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { for _, tc := range tCases { for _, thanosEngine := range []bool{false, true} { - thanosEngine := thanosEngine t.Run(tc.name+fmt.Sprintf("thanos engine: %t, encoding=%s", thanosEngine, enc.String()), func(t *testing.T) { wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} var wQueriables []QueryableWithFilter @@ -1670,7 +1669,6 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() cfg := &Config{} diff --git a/pkg/querier/stats/stats.go b/pkg/querier/stats/stats.go index 127c422878a..a834cd311e1 100644 --- a/pkg/querier/stats/stats.go +++ b/pkg/querier/stats/stats.go @@ -101,7 +101,7 @@ func (s *QueryStats) AddFetchedSeries(series uint64) { atomic.AddUint64(&s.FetchedSeriesCount, series) } -func (s *QueryStats) AddExtraFields(fieldsVals ...interface{}) { +func (s *QueryStats) AddExtraFields(fieldsVals ...any) { if s == nil { return } @@ -124,15 +124,15 @@ func (s *QueryStats) AddExtraFields(fieldsVals ...interface{}) { } } -func (s *QueryStats) LoadExtraFields() []interface{} { +func (s *QueryStats) LoadExtraFields() []any { if s == nil { - return []interface{}{} + return []any{} } s.m.Lock() defer s.m.Unlock() - r := make([]interface{}, 0, len(s.ExtraFields)) + r := make([]any, 0, len(s.ExtraFields)) for k, v := range s.ExtraFields { r = append(r, k, v) } diff --git a/pkg/querier/stats/stats_test.go b/pkg/querier/stats/stats_test.go index 5f2e850aefc..7908d06773d 100644 --- a/pkg/querier/stats/stats_test.go +++ b/pkg/querier/stats/stats_test.go @@ -70,14 +70,14 @@ func TestQueryStats_AddExtraFields(t *testing.T) { stats.AddExtraFields("a", "b") stats.AddExtraFields("c") - checkExtraFields(t, []interface{}{"a", "b", "c", ""}, stats.LoadExtraFields()) + checkExtraFields(t, []any{"a", "b", "c", ""}, stats.LoadExtraFields()) }) t.Run("add and load extra fields nil receiver", func(t *testing.T) { var stats *QueryStats stats.AddExtraFields("a", "b") - checkExtraFields(t, []interface{}{}, stats.LoadExtraFields()) + checkExtraFields(t, []any{}, stats.LoadExtraFields()) }) } @@ -251,7 +251,7 @@ func TestStats_Merge(t *testing.T) { assert.Equal(t, uint64(105), stats1.LoadPeakSamples()) assert.Equal(t, uint64(401), stats1.LoadStoreGatewayTouchedPostings()) assert.Equal(t, uint64(601), stats1.LoadStoreGatewayTouchedPostingBytes()) - checkExtraFields(t, []interface{}{"a", "b", "c", "d"}, stats1.LoadExtraFields()) + checkExtraFields(t, []any{"a", "b", "c", "d"}, stats1.LoadExtraFields()) }) t.Run("merge two nil stats objects", func(t *testing.T) { @@ -265,11 +265,11 @@ func TestStats_Merge(t *testing.T) { assert.Equal(t, uint64(0), stats1.LoadFetchedSeries()) assert.Equal(t, uint64(0), stats1.LoadFetchedChunkBytes()) assert.Equal(t, uint64(0), stats1.LoadFetchedDataBytes()) - checkExtraFields(t, []interface{}{}, stats1.LoadExtraFields()) + checkExtraFields(t, []any{}, stats1.LoadExtraFields()) }) } -func checkExtraFields(t *testing.T, expected, actual []interface{}) { +func checkExtraFields(t *testing.T, expected, actual []any) { t.Parallel() assert.Equal(t, len(expected), len(actual)) expectedMap := map[string]string{} diff --git a/pkg/querier/store_gateway_client_test.go b/pkg/querier/store_gateway_client_test.go index 74d6c6f7df7..34f74528170 100644 --- a/pkg/querier/store_gateway_client_test.go +++ b/pkg/querier/store_gateway_client_test.go @@ -42,7 +42,7 @@ func Test_newStoreGatewayClientFactory(t *testing.T) { reg := prometheus.NewPedanticRegistry() factory := newStoreGatewayClientFactory(cfg, reg) - for i := 0; i < 2; i++ { + for range 2 { client, err := factory(listener.Addr().String()) require.NoError(t, err) defer client.Close() //nolint:errcheck diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable.go b/pkg/querier/tenantfederation/exemplar_merge_queryable.go index c6b24caeb03..33e16ba276a 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable.go @@ -144,7 +144,7 @@ func (m mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Mat // filter out tenants to query and unrelated matchers allMatchedTenantIds, allUnrelatedMatchers := filterAllTenantsAndMatchers(m.idLabelName, m.tenantIds, matchers) - jobs := make([]interface{}, len(allMatchedTenantIds)) + jobs := make([]any, len(allMatchedTenantIds)) results := make([][]exemplar.QueryResult, len(allMatchedTenantIds)) var jobPos int @@ -162,7 +162,7 @@ func (m mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Mat jobPos++ } - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*exemplarSelectJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go b/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go index bb48fc0f299..b52bf1b0828 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go @@ -342,7 +342,7 @@ func Test_MergeExemplarQuerier_Select_WhenUseRegexResolver(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == 2 }) diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index 58cdb7625f2..3a69a6cc8da 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -244,7 +244,7 @@ type stringSliceFuncJob struct { // It doesn't require the output of the stringSliceFunc to be sorted, as results // of LabelValues are not sorted. func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, f stringSliceFunc, tenants map[string]struct{}, ids []string, queriers []storage.Querier) ([]string, annotations.Annotations, error) { - var jobs []interface{} + var jobs []any for pos, id := range ids { if tenants != nil { @@ -260,7 +260,7 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, } parentCtx := ctx - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*stringSliceFuncJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) @@ -339,7 +339,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora log, ctx := spanlogger.New(ctx, "mergeQuerier.Select") defer log.Finish() matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) - var jobs = make([]interface{}, len(matchedValues)) + var jobs = make([]any, len(matchedValues)) var seriesSets = make([]storage.SeriesSet, len(matchedValues)) var jobPos int for labelPos := range ids { @@ -355,7 +355,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora } parentCtx := ctx - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*selectJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) diff --git a/pkg/querier/tenantfederation/merge_queryable_test.go b/pkg/querier/tenantfederation/merge_queryable_test.go index 5be2f70a764..df1ed124683 100644 --- a/pkg/querier/tenantfederation/merge_queryable_test.go +++ b/pkg/querier/tenantfederation/merge_queryable_test.go @@ -654,11 +654,9 @@ func TestMergeQueryable_Select(t *testing.T) { }}, }, } { - scenario := scenario t.Run(scenario.name, func(t *testing.T) { for _, useRegexResolver := range []bool{true, false} { for _, tc := range scenario.selectTestCases { - tc := tc t.Run(fmt.Sprintf("%s, useRegexResolver: %v", tc.name, useRegexResolver), func(t *testing.T) { ctx := context.Background() if useRegexResolver { @@ -686,7 +684,7 @@ func TestMergeQueryable_Select(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(scenario.tenants)) }) @@ -857,7 +855,6 @@ func TestMergeQueryable_LabelNames(t *testing.T) { }, }, } { - scenario := scenario for _, useRegexResolver := range []bool{true, false} { t.Run(fmt.Sprintf("%s, useRegexResolver: %v", scenario.mergeQueryableScenario.name, useRegexResolver), func(t *testing.T) { ctx := context.Background() @@ -885,7 +882,7 @@ func TestMergeQueryable_LabelNames(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(scenario.tenants)) }) @@ -1093,7 +1090,6 @@ func TestMergeQueryable_LabelValues(t *testing.T) { }}, }, } { - scenario := scenario t.Run(scenario.name, func(t *testing.T) { for _, useRegexResolver := range []bool{true, false} { for _, tc := range scenario.labelValuesTestCases { @@ -1123,7 +1119,7 @@ func TestMergeQueryable_LabelValues(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(scenario.tenants)) }) @@ -1263,7 +1259,7 @@ func containsTags(span *mocktracer.MockSpan, expectedTag expectedTag) bool { type spanWithTags struct { name string - tags map[string]interface{} + tags map[string]any } type expectedTag struct { diff --git a/pkg/querier/tenantfederation/metadata_merge_querier.go b/pkg/querier/tenantfederation/metadata_merge_querier.go index 7f796c2b39d..37e5a63f5e1 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier.go @@ -61,7 +61,7 @@ func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context, req *client. return m.upstream.MetricsMetadata(ctx, req) } - jobs := make([]interface{}, len(tenantIds)) + jobs := make([]any, len(tenantIds)) results := make([][]scrape.MetricMetadata, len(tenantIds)) var jobPos int @@ -74,7 +74,7 @@ func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context, req *client. jobPos++ } - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*metadataSelectJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) diff --git a/pkg/querier/tenantfederation/metadata_merge_querier_test.go b/pkg/querier/tenantfederation/metadata_merge_querier_test.go index 95ba8515436..c04e4e3c0b9 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier_test.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier_test.go @@ -176,7 +176,7 @@ func Test_mergeMetadataQuerier_MetricsMetadata_WhenUseRegexResolver(t *testing.T require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == 2 }) diff --git a/pkg/querier/tenantfederation/regex_resolver_test.go b/pkg/querier/tenantfederation/regex_resolver_test.go index e178d91d8a5..03735e8b1cd 100644 --- a/pkg/querier/tenantfederation/regex_resolver_test.go +++ b/pkg/querier/tenantfederation/regex_resolver_test.go @@ -96,7 +96,7 @@ func Test_RegexResolver(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(tc.existingTenants)) }) diff --git a/pkg/querier/tripperware/distributed_query_test.go b/pkg/querier/tripperware/distributed_query_test.go index 92f22ffd890..17b3dd644eb 100644 --- a/pkg/querier/tripperware/distributed_query_test.go +++ b/pkg/querier/tripperware/distributed_query_test.go @@ -118,7 +118,6 @@ func TestLogicalPlanGeneration(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(strconv.Itoa(i)+"_"+tc.name, func(t *testing.T) { t.Parallel() diff --git a/pkg/querier/tripperware/instantquery/instant_query_test.go b/pkg/querier/tripperware/instantquery/instant_query_test.go index 6aa4e797842..2e56eecd685 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_test.go +++ b/pkg/querier/tripperware/instantquery/instant_query_test.go @@ -90,7 +90,6 @@ func TestRequest(t *testing.T) { }, }, } { - tc := tc t.Run(tc.url, func(t *testing.T) { t.Parallel() r, err := http.NewRequest("POST", tc.url, http.NoBody) @@ -434,7 +433,6 @@ func TestResponse(t *testing.T) { }, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var response *http.Response @@ -709,7 +707,6 @@ func TestMergeResponse(t *testing.T) { cancelBeforeMerge: true, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctx, cancelCtx := context.WithCancel(user.InjectOrgID(context.Background(), "1")) @@ -1722,7 +1719,6 @@ func TestMergeResponseProtobuf(t *testing.T) { cancelBeforeMerge: true, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctx, cancelCtx := context.WithCancel(user.InjectOrgID(context.Background(), "1")) @@ -1821,7 +1817,7 @@ func Benchmark_Decode(b *testing.B) { maxSamplesCount := 1000000 samples := make([]tripperware.SampleStream, maxSamplesCount) - for i := 0; i < maxSamplesCount; i++ { + for i := range maxSamplesCount { samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample%v", i), Value: fmt.Sprintf("Value%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample2%v", i), Value: fmt.Sprintf("Value2%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample3%v", i), Value: fmt.Sprintf("Value3%v", i)}) @@ -1864,10 +1860,9 @@ func Benchmark_Decode(b *testing.B) { body, err := json.Marshal(r) require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { response := &http.Response{ StatusCode: 200, Body: io.NopCloser(bytes.NewBuffer(body)), @@ -1885,7 +1880,7 @@ func Benchmark_Decode_Protobuf(b *testing.B) { maxSamplesCount := 1000000 samples := make([]tripperware.SampleStream, maxSamplesCount) - for i := 0; i < maxSamplesCount; i++ { + for i := range maxSamplesCount { samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample%v", i), Value: fmt.Sprintf("Value%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample2%v", i), Value: fmt.Sprintf("Value2%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample3%v", i), Value: fmt.Sprintf("Value3%v", i)}) @@ -1928,10 +1923,9 @@ func Benchmark_Decode_Protobuf(b *testing.B) { body, err := proto.Marshal(&r) require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { response := &http.Response{ StatusCode: 200, Header: http.Header{"Content-Type": []string{"application/x-protobuf"}}, diff --git a/pkg/querier/tripperware/instantquery/limits_test.go b/pkg/querier/tripperware/instantquery/limits_test.go index a365eab414c..155921269cf 100644 --- a/pkg/querier/tripperware/instantquery/limits_test.go +++ b/pkg/querier/tripperware/instantquery/limits_test.go @@ -68,7 +68,6 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() req := &tripperware.PrometheusRequest{Query: testData.query} diff --git a/pkg/querier/tripperware/merge.go b/pkg/querier/tripperware/merge.go index 0e3d8aabb4b..3ebf099f67b 100644 --- a/pkg/querier/tripperware/merge.go +++ b/pkg/querier/tripperware/merge.go @@ -3,6 +3,7 @@ package tripperware import ( "context" "fmt" + "slices" "sort" "github.com/prometheus/common/model" @@ -247,7 +248,7 @@ func statsMerge(shouldSumStats bool, resps []*PrometheusResponse) *PrometheusRes keys = append(keys, key) } - sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + slices.Sort(keys) result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}} for _, key := range keys { diff --git a/pkg/querier/tripperware/merge_test.go b/pkg/querier/tripperware/merge_test.go index 7ee5b0cbbd4..705f75d2c3f 100644 --- a/pkg/querier/tripperware/merge_test.go +++ b/pkg/querier/tripperware/merge_test.go @@ -360,7 +360,6 @@ func TestMergeSampleStreams(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() output := make(map[string]SampleStream) @@ -440,7 +439,6 @@ func TestSliceSamples(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() actual := sliceSamples(tc.samples, tc.minTs) @@ -589,7 +587,6 @@ func TestSliceHistograms(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() actual := sliceHistograms(tc.histograms, tc.minTs) diff --git a/pkg/querier/tripperware/query_attribute_matcher.go b/pkg/querier/tripperware/query_attribute_matcher.go index 002568b7a4e..36e38103952 100644 --- a/pkg/querier/tripperware/query_attribute_matcher.go +++ b/pkg/querier/tripperware/query_attribute_matcher.go @@ -2,6 +2,7 @@ package tripperware import ( "net/http" + "slices" "strings" "time" @@ -159,13 +160,7 @@ func matchAttributeForMetadataQuery(attribute validation.QueryAttribute, op stri if attribute.Regex != "" { matched = true if attribute.Regex != ".*" && attribute.CompiledRegex != nil { - atLeastOneMatched := false - for _, matcher := range r.Form["match[]"] { - if attribute.CompiledRegex.MatchString(matcher) { - atLeastOneMatched = true - break - } - } + atLeastOneMatched := slices.ContainsFunc(r.Form["match[]"], attribute.CompiledRegex.MatchString) if !atLeastOneMatched { return false } diff --git a/pkg/querier/tripperware/query_test.go b/pkg/querier/tripperware/query_test.go index 08f149f43b0..a5d210488d6 100644 --- a/pkg/querier/tripperware/query_test.go +++ b/pkg/querier/tripperware/query_test.go @@ -115,7 +115,7 @@ func TestSampleStreamJSONSerialization(t *testing.T) { } func generateData(timeseries, datapoints int) (floatMatrix, histogramMatrix []*SampleStream) { - for i := 0; i < timeseries; i++ { + for i := range timeseries { lset := labels.FromMap(map[string]string{ model.MetricNameLabel: "timeseries_" + strconv.Itoa(i), "foo": "bar", diff --git a/pkg/querier/tripperware/queryrange/limits_test.go b/pkg/querier/tripperware/queryrange/limits_test.go index 3690e1e0386..31d3008e5cd 100644 --- a/pkg/querier/tripperware/queryrange/limits_test.go +++ b/pkg/querier/tripperware/queryrange/limits_test.go @@ -71,7 +71,6 @@ func TestLimitsMiddleware_MaxQueryLookback(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() req := &tripperware.PrometheusRequest{ @@ -190,7 +189,6 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() req := &tripperware.PrometheusRequest{ diff --git a/pkg/querier/tripperware/queryrange/marshaling_test.go b/pkg/querier/tripperware/queryrange/marshaling_test.go index d0efd0e8d4d..4661d1b168a 100644 --- a/pkg/querier/tripperware/queryrange/marshaling_test.go +++ b/pkg/querier/tripperware/queryrange/marshaling_test.go @@ -27,10 +27,9 @@ func BenchmarkPrometheusCodec_DecodeResponse_Json(b *testing.B) { require.NoError(b, err) b.Log("test prometheus response size:", len(encodedRes)) - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := PrometheusCodec.DecodeResponse(context.Background(), &http.Response{ StatusCode: 200, Header: http.Header{"Content-Type": []string{tripperware.ApplicationJson}}, @@ -53,10 +52,9 @@ func BenchmarkPrometheusCodec_DecodeResponse_Protobuf(b *testing.B) { require.NoError(b, err) b.Log("test prometheus response size:", len(encodedRes)) - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := PrometheusCodec.DecodeResponse(context.Background(), &http.Response{ StatusCode: 200, Header: http.Header{"Content-Type": []string{tripperware.ApplicationProtobuf}}, @@ -76,10 +74,9 @@ func BenchmarkPrometheusCodec_EncodeResponse(b *testing.B) { // Generate a mocked response and marshal it. res := mockPrometheusResponse(numSeries, numSamplesPerSeries) - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := PrometheusCodec.EncodeResponse(context.Background(), nil, res) require.NoError(b, err) } @@ -87,10 +84,10 @@ func BenchmarkPrometheusCodec_EncodeResponse(b *testing.B) { func mockPrometheusResponse(numSeries, numSamplesPerSeries int) *tripperware.PrometheusResponse { stream := make([]tripperware.SampleStream, numSeries) - for s := 0; s < numSeries; s++ { + for s := range numSeries { // Generate random samples. samples := make([]cortexpb.Sample, numSamplesPerSeries) - for i := 0; i < numSamplesPerSeries; i++ { + for i := range numSamplesPerSeries { samples[i] = cortexpb.Sample{ Value: rand.Float64(), TimestampMs: int64(i), diff --git a/pkg/querier/tripperware/queryrange/query_range_test.go b/pkg/querier/tripperware/queryrange/query_range_test.go index 27fba6b1bab..20d69d0fda6 100644 --- a/pkg/querier/tripperware/queryrange/query_range_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_test.go @@ -71,7 +71,6 @@ func TestRequest(t *testing.T) { expectedErr: queryapi.ErrStepTooSmall, }, } { - tc := tc t.Run(tc.url, func(t *testing.T) { t.Parallel() r, err := http.NewRequest("POST", tc.url, http.NoBody) @@ -265,7 +264,6 @@ func TestResponse(t *testing.T) { }, } for i, tc := range testCases { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() protobuf, err := proto.Marshal(tc.promBody) @@ -398,7 +396,6 @@ func TestResponseWithStats(t *testing.T) { isProtobuf: false, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() protobuf, err := proto.Marshal(tc.promBody) @@ -1182,7 +1179,6 @@ func TestMergeAPIResponses(t *testing.T) { }, }, }} { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctx, cancelCtx := context.WithCancel(user.InjectOrgID(context.Background(), "1")) diff --git a/pkg/querier/tripperware/queryrange/results_cache.go b/pkg/querier/tripperware/queryrange/results_cache.go index 6378a82fbef..96f24516bee 100644 --- a/pkg/querier/tripperware/queryrange/results_cache.go +++ b/pkg/querier/tripperware/queryrange/results_cache.go @@ -293,11 +293,9 @@ func (s resultsCache) Do(ctx context.Context, r tripperware.Request) (tripperwar // shouldCacheResponse says whether the response should be cached or not. func (s resultsCache) shouldCacheResponse(ctx context.Context, req tripperware.Request, r tripperware.Response, maxCacheTime int64) bool { headerValues := getHeaderValuesWithName(r, cacheControlHeader) - for _, v := range headerValues { - if v == noStoreValue { - level.Debug(util_log.WithContext(ctx, s.logger)).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cacheControlHeader, noStoreValue)) - return false - } + if slices.Contains(headerValues, noStoreValue) { + level.Debug(util_log.WithContext(ctx, s.logger)).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cacheControlHeader, noStoreValue)) + return false } if !s.isAtModifierCachable(ctx, req, maxCacheTime) { diff --git a/pkg/querier/tripperware/queryrange/results_cache_test.go b/pkg/querier/tripperware/queryrange/results_cache_test.go index 8d4c32cfd25..05e968fb6ec 100644 --- a/pkg/querier/tripperware/queryrange/results_cache_test.go +++ b/pkg/querier/tripperware/queryrange/results_cache_test.go @@ -298,7 +298,6 @@ func TestStatsCacheQuerySamples(t *testing.T) { expectedResponse: mkAPIResponseWithStats(0, 100, 10, false, false), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cfg := ResultsCacheConfig{ @@ -990,7 +989,6 @@ func TestPartition(t *testing.T) { expectedScannedSamplesFromCachedResponse: getScannedSamples(100, 105, 10), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() s := resultsCache{ @@ -1243,7 +1241,6 @@ func TestHandleHit(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() sut := resultsCache{ @@ -1373,7 +1370,6 @@ func TestResultsCacheMaxFreshness(t *testing.T) { expectedResponse: parsedResponse, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var cfg ResultsCacheConfig @@ -1477,7 +1473,6 @@ func TestSplitter_generateCacheKey(t *testing.T) { {"3d5h", &tripperware.PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"}, } for _, tt := range tests { - tt := tt t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) { t.Parallel() ctx := user.InjectOrgID(context.Background(), "1") @@ -1526,7 +1521,6 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) { } for _, tc := range testcases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() calls := 0 diff --git a/pkg/querier/tripperware/queryrange/split_by_interval_test.go b/pkg/querier/tripperware/queryrange/split_by_interval_test.go index 31b0d82541d..0e122ef35fc 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval_test.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval_test.go @@ -61,7 +61,6 @@ func TestNextIntervalBoundary(t *testing.T) { {toMs(day) + 15*seconds, 35 * seconds, 2*toMs(day) - 5*seconds, day}, {toMs(time.Hour) + 15*seconds, 35 * seconds, 2*toMs(time.Hour) - 15*seconds, time.Hour}, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() require.Equal(t, tc.out, nextIntervalBoundary(tc.in, tc.step, tc.interval)) @@ -266,7 +265,6 @@ func TestSplitQuery(t *testing.T) { interval: 3 * time.Hour, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() days, err := splitQuery(tc.input, tc.interval) @@ -321,7 +319,6 @@ func TestSplitByDay(t *testing.T) { intervalFn: dynamicIntervalFn(Config{SplitQueriesByInterval: day, DynamicQuerySplitsConfig: DynamicQuerySplitsConfig{MaxShardsPerQuery: 10}}, mockLimits{}, querysharding.NewQueryAnalyzer(), lookbackDelta), }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var actualCount atomic.Int32 @@ -423,7 +420,6 @@ func Test_evaluateAtModifier(t *testing.T) { expectedErrorCode: http.StatusBadRequest, }, } { - tt := tt t.Run(tt.in, func(t *testing.T) { out, err := evaluateAtModifierFunction(tt.in, start, end) if tt.expectedErrorCode != 0 { diff --git a/pkg/querier/tripperware/queryrange/step_align_test.go b/pkg/querier/tripperware/queryrange/step_align_test.go index ac197b5b46b..5a6b69f8a8b 100644 --- a/pkg/querier/tripperware/queryrange/step_align_test.go +++ b/pkg/querier/tripperware/queryrange/step_align_test.go @@ -40,7 +40,6 @@ func TestStepAlign(t *testing.T) { }, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var result *tripperware.PrometheusRequest diff --git a/pkg/querier/tripperware/queryrange/test_utils.go b/pkg/querier/tripperware/queryrange/test_utils.go index a48ae956131..7d37139d045 100644 --- a/pkg/querier/tripperware/queryrange/test_utils.go +++ b/pkg/querier/tripperware/queryrange/test_utils.go @@ -18,7 +18,7 @@ func genLabels( l := labelSet[0] rest := genLabels(labelSet[1:], labelBuckets) - for i := 0; i < labelBuckets; i++ { + for i := range labelBuckets { x := labels.Label{ Name: l, Value: fmt.Sprintf("%d", i), diff --git a/pkg/querier/tripperware/queryrange/value_test.go b/pkg/querier/tripperware/queryrange/value_test.go index b31230b4ae5..38e30532223 100644 --- a/pkg/querier/tripperware/queryrange/value_test.go +++ b/pkg/querier/tripperware/queryrange/value_test.go @@ -146,7 +146,6 @@ func TestFromValue(t *testing.T) { } for i, c := range testExpr { - c := c t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { t.Parallel() result, err := FromResult(c.input) diff --git a/pkg/querier/tripperware/shard_by.go b/pkg/querier/tripperware/shard_by.go index 5cd23459e14..9053e522e2b 100644 --- a/pkg/querier/tripperware/shard_by.go +++ b/pkg/querier/tripperware/shard_by.go @@ -92,7 +92,7 @@ func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { func (s shardBy) shardQuery(l log.Logger, verticalShardSize int, r Request, analysis querysharding.QueryAnalysis) []Request { reqs := make([]Request, verticalShardSize) - for i := 0; i < verticalShardSize; i++ { + for i := range verticalShardSize { q, err := cquerysharding.InjectShardingInfo(r.GetQuery(), &storepb.ShardInfo{ TotalShards: int64(verticalShardSize), ShardIndex: int64(i), diff --git a/pkg/querier/tripperware/util.go b/pkg/querier/tripperware/util.go index c1e2144b969..90f2224c115 100644 --- a/pkg/querier/tripperware/util.go +++ b/pkg/querier/tripperware/util.go @@ -38,11 +38,8 @@ func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits }() respChan, errChan := make(chan RequestResponse), make(chan error) - parallelism := validation.SmallestPositiveIntPerTenant(tenantIDs, limits.MaxQueryParallelism) - if parallelism > len(reqs) { - parallelism = len(reqs) - } - for i := 0; i < parallelism; i++ { + parallelism := min(validation.SmallestPositiveIntPerTenant(tenantIDs, limits.MaxQueryParallelism), len(reqs)) + for range parallelism { go func() { for req := range intermediate { resp, err := downstream.Do(ctx, req) diff --git a/pkg/querier/worker/frontend_processor_test.go b/pkg/querier/worker/frontend_processor_test.go index 3b10cc29a6d..2881a10b0f9 100644 --- a/pkg/querier/worker/frontend_processor_test.go +++ b/pkg/querier/worker/frontend_processor_test.go @@ -34,7 +34,7 @@ func TestRecvFailDoesntCancelProcess(t *testing.T) { mgr.processQueriesOnSingleStream(ctx, cc, "test:12345") }() - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { return running.Load() }) @@ -44,7 +44,7 @@ func TestRecvFailDoesntCancelProcess(t *testing.T) { assert.Equal(t, true, running.Load()) cancel() - test.Poll(t, time.Second, false, func() interface{} { + test.Poll(t, time.Second, false, func() any { return running.Load() }) } @@ -61,18 +61,18 @@ func TestContextCancelStopsProcess(t *testing.T) { pm := newProcessorManager(ctx, &mockProcessor{}, cc, "test") pm.concurrency(1) - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return int(pm.currentProcessors.Load()) }) cancel() - test.Poll(t, time.Second, 0, func() interface{} { + test.Poll(t, time.Second, 0, func() any { return int(pm.currentProcessors.Load()) }) pm.stop() - test.Poll(t, time.Second, 0, func() interface{} { + test.Poll(t, time.Second, 0, func() any { return int(pm.currentProcessors.Load()) }) } diff --git a/pkg/querier/worker/scheduler_processor_test.go b/pkg/querier/worker/scheduler_processor_test.go index c3d2534e441..ea718b0779a 100644 --- a/pkg/querier/worker/scheduler_processor_test.go +++ b/pkg/querier/worker/scheduler_processor_test.go @@ -92,12 +92,12 @@ func (m *mockQuerierLoopClient) Context() context.Context { return args.Get(0).(context.Context) } -func (m *mockQuerierLoopClient) SendMsg(msg interface{}) error { +func (m *mockQuerierLoopClient) SendMsg(msg any) error { args := m.Called(msg) return args.Error(0) } -func (m *mockQuerierLoopClient) RecvMsg(msg interface{}) error { +func (m *mockQuerierLoopClient) RecvMsg(msg any) error { args := m.Called(msg) return args.Error(0) } diff --git a/pkg/querier/worker/worker_test.go b/pkg/querier/worker/worker_test.go index 13a21949c5e..d2076883064 100644 --- a/pkg/querier/worker/worker_test.go +++ b/pkg/querier/worker/worker_test.go @@ -69,7 +69,6 @@ func TestResetConcurrency(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() cfg := Config{ @@ -88,13 +87,13 @@ func TestResetConcurrency(t *testing.T) { w.AddressAdded(fmt.Sprintf("127.0.0.1:%d", i)) } - test.Poll(t, 250*time.Millisecond, tt.expectedConcurrency, func() interface{} { + test.Poll(t, 250*time.Millisecond, tt.expectedConcurrency, func() any { return getConcurrentProcessors(w) }) // now we remove an address and ensure we still have the expected concurrency w.AddressRemoved(fmt.Sprintf("127.0.0.1:%d", rand.Intn(tt.numTargets))) - test.Poll(t, 250*time.Millisecond, tt.expectedConcurrencyAfterTargetRemoval, func() interface{} { + test.Poll(t, 250*time.Millisecond, tt.expectedConcurrencyAfterTargetRemoval, func() any { return getConcurrentProcessors(w) }) diff --git a/pkg/querysharding/util.go b/pkg/querysharding/util.go index 20d56a53d5c..05a8552cc32 100644 --- a/pkg/querysharding/util.go +++ b/pkg/querysharding/util.go @@ -18,7 +18,7 @@ const ( ) var ( - Buffers = sync.Pool{New: func() interface{} { + Buffers = sync.Pool{New: func() any { b := make([]byte, 0, 100) return &b }} diff --git a/pkg/ring/basic_lifecycler.go b/pkg/ring/basic_lifecycler.go index 70491b1a1b2..fb751e4fa1d 100644 --- a/pkg/ring/basic_lifecycler.go +++ b/pkg/ring/basic_lifecycler.go @@ -271,7 +271,7 @@ heartbeatLoop: func (l *BasicLifecycler) registerInstance(ctx context.Context) error { var instanceDesc InstanceDesc - err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) var exists bool @@ -392,7 +392,7 @@ func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool { func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error { level.Info(l.logger).Log("msg", "unregistering instance from ring", "ring", l.ringName) - err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) { if in == nil { return nil, false, fmt.Errorf("found empty ring when trying to unregister") } @@ -418,7 +418,7 @@ func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error { func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *InstanceDesc) bool) error { var instanceDesc InstanceDesc - err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) var ok bool diff --git a/pkg/ring/basic_lifecycler_delegates_test.go b/pkg/ring/basic_lifecycler_delegates_test.go index 1a81233ac96..cb7e4672f46 100644 --- a/pkg/ring/basic_lifecycler_delegates_test.go +++ b/pkg/ring/basic_lifecycler_delegates_test.go @@ -172,7 +172,7 @@ func TestTokensPersistencyDelegate_ShouldHandleTheCaseTheInstanceIsAlreadyInTheR defer services.StopAndAwaitTerminated(ctx, lifecycler) //nolint:errcheck // Add the instance to the ring. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := NewDesc() ringDesc.AddIngester(cfg.ID, cfg.Addr, cfg.Zone, testData.initialTokens, testData.initialState, registeredAt) return ringDesc, true, nil @@ -278,7 +278,7 @@ func TestAutoForgetDelegate(t *testing.T) { require.NoError(t, err) // Setup the initial state of the ring. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := NewDesc() testData.setup(ringDesc) return ringDesc, true, nil @@ -289,7 +289,7 @@ func TestAutoForgetDelegate(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler) //nolint:errcheck // Wait until an heartbeat has been sent. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { return testutil.ToFloat64(lifecycler.metrics.heartbeats) > 0 }) diff --git a/pkg/ring/basic_lifecycler_test.go b/pkg/ring/basic_lifecycler_test.go index b21c3cd4fdb..6e9d704f715 100644 --- a/pkg/ring/basic_lifecycler_test.go +++ b/pkg/ring/basic_lifecycler_test.go @@ -89,7 +89,7 @@ func TestBasicLifecycler_RegisterOnStart(t *testing.T) { // Add an initial instance to the ring. if testData.initialInstanceDesc != nil { - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { desc := testData.initialInstanceDesc ringDesc := GetOrCreateRingDesc(in) @@ -244,7 +244,7 @@ func TestBasicLifecycler_HeartbeatWhileRunning(t *testing.T) { desc, _ := getInstanceFromStore(t, store, testInstanceID) initialTimestamp := desc.GetTimestamp() - test.Poll(t, time.Second*5, true, func() interface{} { + test.Poll(t, time.Second*5, true, func() any { desc, _ := getInstanceFromStore(t, store, testInstanceID) currTimestamp := desc.GetTimestamp() @@ -269,7 +269,7 @@ func TestBasicLifecycler_HeartbeatWhileStopping(t *testing.T) { // Since the hearbeat timestamp is in seconds we would have to wait 1s before we can assert // on it being changed, regardless the heartbeat period. To speed up this test, we're going // to reset the timestamp to 0 and then assert it has been updated. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) instanceDesc := ringDesc.Ingesters[testInstanceID] instanceDesc.Timestamp = 0 @@ -278,7 +278,7 @@ func TestBasicLifecycler_HeartbeatWhileStopping(t *testing.T) { })) // Wait until the timestamp has been updated. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { desc, _ := getInstanceFromStore(t, store, testInstanceID) currTimestamp := desc.GetTimestamp() @@ -313,11 +313,11 @@ func TestBasicLifecycler_HeartbeatAfterBackendRest(t *testing.T) { // Now we delete it from the ring to simulate a ring storage reset and we expect the next heartbeat // will restore it. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { return NewDesc(), true, nil })) - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { desc, ok := getInstanceFromStore(t, store, testInstanceID) return ok && desc.GetTimestamp() > 0 && @@ -371,7 +371,7 @@ func TestBasicLifecycler_TokensObservePeriod(t *testing.T) { // While the lifecycler is starting we poll the ring. As soon as the instance // is registered, we remove some tokens to simulate how gossip memberlist // reconciliation works in case of clashing tokens. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { // Ensure the instance has been registered in the ring. desc, ok := getInstanceFromStore(t, store, testInstanceID) if !ok { @@ -379,7 +379,7 @@ func TestBasicLifecycler_TokensObservePeriod(t *testing.T) { } // Remove some tokens. - return store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + return store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) ringDesc.AddIngester(testInstanceID, desc.Addr, desc.Zone, Tokens{4, 5}, desc.State, time.Now()) return ringDesc, true, nil @@ -413,7 +413,7 @@ func TestBasicLifecycler_updateInstance_ShouldAddInstanceToTheRingIfDoesNotExist expectedRegisteredAt := lifecycler.GetRegisteredAt() // Now we delete it from the ring to simulate a ring storage reset. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { return NewDesc(), true, nil })) diff --git a/pkg/ring/bench/ring_memberlist_test.go b/pkg/ring/bench/ring_memberlist_test.go index 1366c47aa46..51fe51c196a 100644 --- a/pkg/ring/bench/ring_memberlist_test.go +++ b/pkg/ring/bench/ring_memberlist_test.go @@ -81,7 +81,7 @@ func BenchmarkMemberlistReceiveWithRingDesc(b *testing.B) { const numTokens = 128 initialDesc := ring.NewDesc() { - for i := 0; i < numInstances; i++ { + for i := range numInstances { tokens := generateUniqueTokens(i, numTokens) initialDesc.AddIngester(fmt.Sprintf("instance-%d", i), "127.0.0.1", "zone", tokens, ring.ACTIVE, time.Now()) } @@ -101,9 +101,7 @@ func BenchmarkMemberlistReceiveWithRingDesc(b *testing.B) { testMsgs[i] = encodeMessage(b, "ring", testDesc) } - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { mkv.NotifyMsg(testMsgs[i]) } } diff --git a/pkg/ring/client/pool.go b/pkg/ring/client/pool.go index 981a7399dda..e7b822592e5 100644 --- a/pkg/ring/client/pool.go +++ b/pkg/ring/client/pool.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "slices" "sync" "time" @@ -13,7 +14,6 @@ import ( "github.com/weaveworks/common/user" "google.golang.org/grpc/health/grpc_health_v1" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -165,7 +165,7 @@ func (p *Pool) removeStaleClients() { } for _, addr := range p.RegisteredAddresses() { - if util.StringsContain(serviceAddrs, addr) { + if slices.Contains(serviceAddrs, addr) { continue } level.Info(p.logger).Log("msg", "removing stale client", "addr", addr) diff --git a/pkg/ring/http.go b/pkg/ring/http.go index cbef6f3ce65..023b716fefe 100644 --- a/pkg/ring/http.go +++ b/pkg/ring/http.go @@ -94,7 +94,7 @@ func init() { } func (r *Ring) forget(ctx context.Context, id string) error { - unregister := func(in interface{}) (out interface{}, retry bool, err error) { + unregister := func(in any) (out any, retry bool, err error) { if in == nil { return nil, false, fmt.Errorf("found empty ring when trying to unregister") } diff --git a/pkg/ring/kv/client.go b/pkg/ring/kv/client.go index eae1ee25189..a2e55689daa 100644 --- a/pkg/ring/kv/client.go +++ b/pkg/ring/kv/client.go @@ -95,7 +95,7 @@ type Client interface { // Get a specific key. Will use a codec to deserialise key to appropriate type. // If the key does not exist, Get will return nil and no error. - Get(ctx context.Context, key string) (interface{}, error) + Get(ctx context.Context, key string) (any, error) // Delete a specific key. Deletions are best-effort and no error will // be returned if the key does not exist. @@ -108,13 +108,13 @@ type Client interface { // with new value etc. Guarantees that only a single concurrent CAS // succeeds. Callback can return nil to indicate it is happy with existing // value. - CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error + CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error // WatchKey calls f whenever the value stored under key changes. - WatchKey(ctx context.Context, key string, f func(interface{}) bool) + WatchKey(ctx context.Context, key string, f func(any) bool) // WatchPrefix calls f whenever any value stored under prefix changes. - WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) + WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) // LastUpdateTime returns the time a key was last sync by the kv store LastUpdateTime(key string) time.Time diff --git a/pkg/ring/kv/client_test.go b/pkg/ring/kv/client_test.go index b31f904d08f..26a0c20f6c2 100644 --- a/pkg/ring/kv/client_test.go +++ b/pkg/ring/kv/client_test.go @@ -64,7 +64,7 @@ func Test_createClient_singleBackend_mustContainRoleAndTypeLabels(t *testing.T) reg := prometheus.NewPedanticRegistry() client, err := createClient("mock", "/test1", storeCfg, testCodec, Primary, reg, testLogger{}) require.NoError(t, err) - require.NoError(t, client.CAS(context.Background(), "/test", func(_ interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, client.CAS(context.Background(), "/test", func(_ any) (out any, retry bool, err error) { out = &mockMessage{id: "inCAS"} retry = false return @@ -82,7 +82,7 @@ func Test_createClient_multiBackend_mustContainRoleAndTypeLabels(t *testing.T) { reg := prometheus.NewPedanticRegistry() client, err := createClient("multi", "/test1", storeCfg, testCodec, Primary, reg, testLogger{}) require.NoError(t, err) - require.NoError(t, client.CAS(context.Background(), "/test", func(_ interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, client.CAS(context.Background(), "/test", func(_ any) (out any, retry bool, err error) { out = &mockMessage{id: "inCAS"} retry = false return @@ -154,6 +154,6 @@ func (m *mockMessage) ProtoMessage() { type testLogger struct { } -func (l testLogger) Log(keyvals ...interface{}) error { +func (l testLogger) Log(keyvals ...any) error { return nil } diff --git a/pkg/ring/kv/codec/clonable.go b/pkg/ring/kv/codec/clonable.go index c3df74c6219..5b0eb38c84d 100644 --- a/pkg/ring/kv/codec/clonable.go +++ b/pkg/ring/kv/codec/clonable.go @@ -2,5 +2,5 @@ package codec type Clonable interface { // Clone should return a deep copy of the state. - Clone() interface{} + Clone() any } diff --git a/pkg/ring/kv/codec/codec.go b/pkg/ring/kv/codec/codec.go index d701bbe2082..9c88473e501 100644 --- a/pkg/ring/kv/codec/codec.go +++ b/pkg/ring/kv/codec/codec.go @@ -10,11 +10,11 @@ import ( // Codec allows KV clients to serialise and deserialise values. type Codec interface { - Decode([]byte) (interface{}, error) - Encode(interface{}) ([]byte, error) + Decode([]byte) (any, error) + Encode(any) ([]byte, error) - DecodeMultiKey(map[string][]byte) (interface{}, error) - EncodeMultiKey(interface{}) (map[string][]byte, error) + DecodeMultiKey(map[string][]byte) (any, error) + EncodeMultiKey(any) (map[string][]byte, error) // CodecID is a short identifier to communicate what codec should be used to decode the value. // Once in use, this should be stable to avoid confusing other clients. @@ -36,12 +36,12 @@ func (p Proto) CodecID() string { } // Decode implements Codec -func (p Proto) Decode(bytes []byte) (interface{}, error) { +func (p Proto) Decode(bytes []byte) (any, error) { return p.decode(bytes, p.factory()) } // DecodeMultiKey implements Codec -func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { +func (p Proto) DecodeMultiKey(data map[string][]byte) (any, error) { msg := p.factory() // Don't even try out, ok := msg.(MultiKey) @@ -50,7 +50,7 @@ func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { } if len(data) > 0 { - res := make(map[string]interface{}, len(data)) + res := make(map[string]any, len(data)) for key, bytes := range data { decoded, err := p.decode(bytes, out.GetItemFactory()) if err != nil { @@ -64,7 +64,7 @@ func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { return out, nil } -func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) { +func (p Proto) decode(bytes []byte, out proto.Message) (any, error) { bytes, err := snappy.Decode(nil, bytes) if err != nil { return nil, err @@ -76,7 +76,7 @@ func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) { } // Encode implements Codec -func (p Proto) Encode(msg interface{}) ([]byte, error) { +func (p Proto) Encode(msg any) ([]byte, error) { bytes, err := proto.Marshal(msg.(proto.Message)) if err != nil { return nil, err @@ -85,7 +85,7 @@ func (p Proto) Encode(msg interface{}) ([]byte, error) { } // EncodeMultiKey implements Codec -func (p Proto) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { +func (p Proto) EncodeMultiKey(msg any) (map[string][]byte, error) { // Don't even try r, ok := msg.(MultiKey) if !ok || r == nil { @@ -112,19 +112,19 @@ func (String) CodecID() string { } // Decode implements Codec. -func (String) Decode(bytes []byte) (interface{}, error) { +func (String) Decode(bytes []byte) (any, error) { return string(bytes), nil } // Encode implements Codec. -func (String) Encode(msg interface{}) ([]byte, error) { +func (String) Encode(msg any) ([]byte, error) { return []byte(msg.(string)), nil } -func (String) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { +func (String) EncodeMultiKey(msg any) (map[string][]byte, error) { return nil, errors.New("String codec does not support EncodeMultiKey") } -func (String) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (String) DecodeMultiKey(map[string][]byte) (any, error) { return nil, errors.New("String codec does not support DecodeMultiKey") } diff --git a/pkg/ring/kv/codec/codec_test.go b/pkg/ring/kv/codec/codec_test.go index ff729626e7b..99d1961cdd0 100644 --- a/pkg/ring/kv/codec/codec_test.go +++ b/pkg/ring/kv/codec/codec_test.go @@ -28,7 +28,7 @@ func Test_EncodeMultikey(t *testing.T) { codec := NewProtoCodec("test", newProtoDescMock) descMock := &DescMock{} expectedSplitKeys := []string{"t1", "t2"} - expectedSplit := map[string]interface{}{ + expectedSplit := map[string]any{ expectedSplitKeys[0]: descMock, expectedSplitKeys[1]: descMock, } @@ -94,17 +94,17 @@ func newProtoDescMock() proto.Message { return &DescMock{} } -func (m *DescMock) Clone() interface{} { +func (m *DescMock) Clone() any { args := m.Called() return args.Get(0) } -func (m *DescMock) SplitByID() map[string]interface{} { +func (m *DescMock) SplitByID() map[string]any { args := m.Called() - return args.Get(0).(map[string]interface{}) + return args.Get(0).(map[string]any) } -func (m *DescMock) JoinIds(map[string]interface{}) { +func (m *DescMock) JoinIds(map[string]any) { m.Called() } @@ -113,7 +113,7 @@ func (m *DescMock) GetItemFactory() proto.Message { return args.Get(0).(proto.Message) } -func (m *DescMock) FindDifference(that MultiKey) (interface{}, []string, error) { +func (m *DescMock) FindDifference(that MultiKey) (any, []string, error) { args := m.Called(that) var err error if args.Get(2) != nil { diff --git a/pkg/ring/kv/codec/multikey.go b/pkg/ring/kv/codec/multikey.go index bd8802c4adc..b2e9f12abc6 100644 --- a/pkg/ring/kv/codec/multikey.go +++ b/pkg/ring/kv/codec/multikey.go @@ -9,11 +9,11 @@ type MultiKey interface { // SplitByID Split interface in array of key and value. THe key is a unique identifier of an instance in the ring. The value is // interface with its data. The interface resultant need to be a proto.Message - SplitByID() map[string]interface{} + SplitByID() map[string]any // JoinIds update the current interface to add receiving key value information. The key is an unique identifier for an instance. // The value is the information for that instance. - JoinIds(in map[string]interface{}) + JoinIds(in map[string]any) // GetItemFactory method to be used for deserilaize the value information from an instance GetItemFactory() proto.Message @@ -21,5 +21,5 @@ type MultiKey interface { // FindDifference returns the difference between two Multikeys. The returns are an interface which also implements Multikey // with an array of keys which were changed, and an array of strings which are unique identifiers deleted. An error is // returned when that does not implement the correct codec - FindDifference(that MultiKey) (interface{}, []string, error) + FindDifference(that MultiKey) (any, []string, error) } diff --git a/pkg/ring/kv/consul/client.go b/pkg/ring/kv/consul/client.go index a9ecdc279e9..7e86bd8aef2 100644 --- a/pkg/ring/kv/consul/client.go +++ b/pkg/ring/kv/consul/client.go @@ -146,7 +146,7 @@ func NewClient(cfg Config, codec codec.Codec, logger log.Logger, registerer prom } // Put is mostly here for testing. -func (c *Client) Put(ctx context.Context, key string, value interface{}) error { +func (c *Client) Put(ctx context.Context, key string, value any) error { bytes, err := c.codec.Encode(value) if err != nil { return err @@ -163,13 +163,13 @@ func (c *Client) Put(ctx context.Context, key string, value interface{}) error { // CAS atomically modifies a value in a callback. // If value doesn't exist you'll get nil as an argument to your callback. -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return instrument.CollectedRequest(ctx, "CAS loop", c.consulMetrics.consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return c.cas(ctx, key, f) }) } -func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) cas(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { retries := c.cfg.MaxCasRetries if retries == 0 { retries = 10 @@ -193,7 +193,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou level.Error(c.logger).Log("msg", "error getting key", "key", key, "err", err) continue } - var intermediate interface{} + var intermediate any if kvp != nil { out, err := c.codec.Decode(kvp.Value) if err != nil { @@ -247,7 +247,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou // value. To construct the deserialised value, a factory function should be // supplied which generates an empty struct for WatchKey to deserialise // into. This function blocks until the context is cancelled or f returns false. -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { var ( backoff = backoff.New(ctx, backoffConfig) index = uint64(0) @@ -308,7 +308,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b // WatchPrefix will watch a given prefix in Consul for new keys and changes to existing keys under that prefix. // When the value under said key changes, the f callback is called with the deserialised value. // Values in Consul are assumed to be JSON. This function blocks until the context is cancelled. -func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { var ( backoff = backoff.New(ctx, backoffConfig) index = uint64(0) @@ -387,7 +387,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { } // Get implements kv.Get. -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { options := &consul.QueryOptions{ AllowStale: !c.cfg.ConsistentReads, RequireConsistent: c.cfg.ConsistentReads, @@ -434,9 +434,6 @@ func (c *Client) createRateLimiter() *rate.Limiter { // burst is ignored when limit = rate.Inf return rate.NewLimiter(rate.Inf, 0) } - burst := c.cfg.WatchKeyBurstSize - if burst < 1 { - burst = 1 - } + burst := max(c.cfg.WatchKeyBurstSize, 1) return rate.NewLimiter(rate.Limit(c.cfg.WatchKeyRateLimit), burst) } diff --git a/pkg/ring/kv/consul/client_test.go b/pkg/ring/kv/consul/client_test.go index e3ab734305b..35c75b58ed8 100644 --- a/pkg/ring/kv/consul/client_test.go +++ b/pkg/ring/kv/consul/client_test.go @@ -28,7 +28,7 @@ func writeValuesToKV(t *testing.T, client *Client, key string, start, end int, s defer close(ch) for i := start; i <= end; i++ { t.Log("ts", time.Now(), "msg", "writing value", "val", i) - _, _ = client.kv.Put(&consul.KVPair{Key: key, Value: []byte(fmt.Sprintf("%d", i))}, nil) + _, _ = client.kv.Put(&consul.KVPair{Key: key, Value: fmt.Appendf(nil, "%d", i)}, nil) time.Sleep(sleep) } }() @@ -181,7 +181,7 @@ func TestReset(t *testing.T) { defer close(ch) for i := 0; i <= max; i++ { t.Log("ts", time.Now(), "msg", "writing value", "val", i) - _, _ = c.kv.Put(&consul.KVPair{Key: key, Value: []byte(fmt.Sprintf("%d", i))}, nil) + _, _ = c.kv.Put(&consul.KVPair{Key: key, Value: fmt.Appendf(nil, "%d", i)}, nil) if i == 1 { c.kv.(*mockKV).ResetIndex() } @@ -214,7 +214,7 @@ func observeValueForSomeTime(t *testing.T, client *Client, key string, timeout t observed := []string(nil) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - client.WatchKey(ctx, key, func(i interface{}) bool { + client.WatchKey(ctx, key, func(i any) bool { s, ok := i.(string) if !ok { return false @@ -248,7 +248,7 @@ func TestWatchKeyWithNoStartValue(t *testing.T) { defer fn() reported := 0 - c.WatchKey(ctx, key, func(i interface{}) bool { + c.WatchKey(ctx, key, func(i any) bool { reported++ return reported != 2 }) @@ -260,6 +260,6 @@ func TestWatchKeyWithNoStartValue(t *testing.T) { type testLogger struct { } -func (l testLogger) Log(keyvals ...interface{}) error { +func (l testLogger) Log(keyvals ...any) error { return nil } diff --git a/pkg/ring/kv/dynamodb/client.go b/pkg/ring/kv/dynamodb/client.go index ba0d0387693..9c3e45b65b7 100644 --- a/pkg/ring/kv/dynamodb/client.go +++ b/pkg/ring/kv/dynamodb/client.go @@ -98,7 +98,7 @@ func (c *Client) List(ctx context.Context, key string) ([]string, error) { return resp, err } -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) if err != nil { level.Warn(c.logger).Log("msg", "error Get", "key", key, "err", err) @@ -135,7 +135,7 @@ func (c *Client) Delete(ctx context.Context, key string) error { return err } -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { bo := backoff.New(ctx, c.backoffConfig) for bo.Ongoing() { c.ddbMetrics.dynamodbCasAttempts.Inc() @@ -229,7 +229,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou return err } -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { bo := backoff.New(ctx, c.backoffConfig) for bo.Ongoing() { @@ -271,7 +271,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b } } -func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { bo := backoff.New(ctx, c.backoffConfig) for bo.Ongoing() { diff --git a/pkg/ring/kv/dynamodb/client_test.go b/pkg/ring/kv/dynamodb/client_test.go index f82a72de439..6885998d695 100644 --- a/pkg/ring/kv/dynamodb/client_test.go +++ b/pkg/ring/kv/dynamodb/client_test.go @@ -38,7 +38,7 @@ func Test_CAS_ErrorNoRetry(t *testing.T) { codecMock.On("DecodeMultiKey").Return(descMock, nil).Twice() descMock.On("Clone").Return(descMock).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return nil, false, expectedErr }) @@ -91,7 +91,7 @@ func Test_CAS_Backoff(t *testing.T) { descMock.On("FindDifference", descMock).Return(descMock, []string{"childkey"}, nil).Times(tc.expectedBatchCalls) codecMock.On("EncodeMultiKey").Return(map[string][]byte{}, nil).Times(tc.expectedBatchCalls) - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -115,7 +115,7 @@ func Test_CAS_Failed(t *testing.T) { ddbMock.On("Query").Return(map[string]dynamodbItem{}, errors.Errorf("test")) - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -145,7 +145,7 @@ func Test_CAS_Update(t *testing.T) { codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() ddbMock.On("Batch", context.TODO(), expectedBatch, []dynamodbKey{}).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -172,7 +172,7 @@ func Test_CAS_Delete(t *testing.T) { codecMock.On("EncodeMultiKey").Return(map[string][]byte{}, nil).Once() ddbMock.On("Batch", context.TODO(), map[dynamodbKey]dynamodbItem{}, expectedBatch).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -208,7 +208,7 @@ func Test_CAS_Update_Delete(t *testing.T) { codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() ddbMock.On("Batch", context.TODO(), expectedUpdateBatch, expectedDeleteBatch).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -227,7 +227,7 @@ func Test_WatchKey(t *testing.T) { ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil) codecMock.On("DecodeMultiKey").Return(descMock, nil) - c.WatchKey(context.TODO(), key, func(i interface{}) bool { + c.WatchKey(context.TODO(), key, func(i any) bool { timesCalled++ ddbMock.AssertNumberOfCalls(t, "Query", timesCalled) codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", timesCalled) @@ -245,7 +245,7 @@ func Test_WatchKey_UpdateStale(t *testing.T) { ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() codecMock.On("DecodeMultiKey").Return(staleData, nil) - c.WatchKey(context.TODO(), key, func(i interface{}) bool { + c.WatchKey(context.TODO(), key, func(i any) bool { ddbMock.AssertNumberOfCalls(t, "Query", 1) codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", 1) require.EqualValues(t, staleData, i) @@ -255,7 +255,7 @@ func Test_WatchKey_UpdateStale(t *testing.T) { ddbMock.On("Query").Return(map[string]dynamodbItem{}, errors.Errorf("failed")) staleData.On("Clone").Return(staleData).Once() - c.WatchKey(context.TODO(), key, func(i interface{}) bool { + c.WatchKey(context.TODO(), key, func(i any) bool { ddbMock.AssertNumberOfCalls(t, "Query", 12) codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", 1) require.EqualValues(t, staleData, i) @@ -288,7 +288,7 @@ func Test_CAS_UpdateStale(t *testing.T) { codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() ddbMock.On("Batch", context.TODO(), expectedBatch, []dynamodbKey{}).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMockResult, true, nil }) @@ -310,7 +310,7 @@ func Test_WatchPrefix(t *testing.T) { ddbMock.On("Query").Return(data, nil) codecMock.On("Decode").Twice() - c.WatchPrefix(context.TODO(), key, func(key string, i interface{}) bool { + c.WatchPrefix(context.TODO(), key, func(key string, i any) bool { require.EqualValues(t, string(data[key].data), i) delete(data, key) calls++ @@ -421,7 +421,7 @@ func (m *MockDynamodbClient) Batch(ctx context.Context, put map[dynamodbKey]dyna type TestLogger struct { } -func (l TestLogger) Log(...interface{}) error { +func (l TestLogger) Log(...any) error { return nil } @@ -435,23 +435,23 @@ func (*CodecMock) CodecID() string { } // Decode implements Codec. -func (m *CodecMock) Decode(bytes []byte) (interface{}, error) { +func (m *CodecMock) Decode(bytes []byte) (any, error) { m.Called() return string(bytes), nil } // Encode implements Codec. -func (m *CodecMock) Encode(i interface{}) ([]byte, error) { +func (m *CodecMock) Encode(i any) ([]byte, error) { m.Called() return []byte(i.(string)), nil } -func (m *CodecMock) EncodeMultiKey(interface{}) (map[string][]byte, error) { +func (m *CodecMock) EncodeMultiKey(any) (map[string][]byte, error) { args := m.Called() return args.Get(0).(map[string][]byte), nil } -func (m *CodecMock) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (m *CodecMock) DecodeMultiKey(map[string][]byte) (any, error) { args := m.Called() var err error if args.Get(1) != nil { @@ -464,17 +464,17 @@ type DescMock struct { mock.Mock } -func (m *DescMock) Clone() interface{} { +func (m *DescMock) Clone() any { args := m.Called() return args.Get(0) } -func (m *DescMock) SplitByID() map[string]interface{} { +func (m *DescMock) SplitByID() map[string]any { args := m.Called() - return args.Get(0).(map[string]interface{}) + return args.Get(0).(map[string]any) } -func (m *DescMock) JoinIds(map[string]interface{}) { +func (m *DescMock) JoinIds(map[string]any) { m.Called() } @@ -483,7 +483,7 @@ func (m *DescMock) GetItemFactory() proto.Message { return args.Get(0).(proto.Message) } -func (m *DescMock) FindDifference(that codec.MultiKey) (interface{}, []string, error) { +func (m *DescMock) FindDifference(that codec.MultiKey) (any, []string, error) { args := m.Called(that) var err error if args.Get(2) != nil { diff --git a/pkg/ring/kv/dynamodb/dynamodb.go b/pkg/ring/kv/dynamodb/dynamodb.go index 57246497016..d9d63a565e6 100644 --- a/pkg/ring/kv/dynamodb/dynamodb.go +++ b/pkg/ring/kv/dynamodb/dynamodb.go @@ -213,7 +213,7 @@ func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem } writeRequestsSlices := make([][]*dynamodb.TransactWriteItem, int(math.Ceil(float64(writeRequestSize)/float64(DdbBatchSizeLimit)))) - for i := 0; i < len(writeRequestsSlices); i++ { + for i := range writeRequestsSlices { writeRequestsSlices[i] = make([]*dynamodb.TransactWriteItem, 0, DdbBatchSizeLimit) } currIdx := 0 diff --git a/pkg/ring/kv/etcd/etcd.go b/pkg/ring/kv/etcd/etcd.go index ca7dcf050a2..1152bff5f75 100644 --- a/pkg/ring/kv/etcd/etcd.go +++ b/pkg/ring/kv/etcd/etcd.go @@ -122,7 +122,7 @@ func New(cfg Config, codec codec.Codec, logger log.Logger) (*Client, error) { } // CAS implements kv.Client. -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { var revision int64 var lastErr error @@ -137,7 +137,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou continue } - var intermediate interface{} + var intermediate any if len(resp.Kvs) > 0 { intermediate, err = c.codec.Decode(resp.Kvs[0].Value) if err != nil { @@ -195,7 +195,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou } // WatchKey implements kv.Client. -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { backoff := backoff.New(ctx, backoff.Config{ MinBackoff: 1 * time.Second, MaxBackoff: 1 * time.Minute, @@ -236,7 +236,7 @@ outer: } // WatchPrefix implements kv.Client. -func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, any) bool) { backoff := backoff.New(ctx, backoff.Config{ MinBackoff: 1 * time.Second, MaxBackoff: 1 * time.Minute, @@ -298,7 +298,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { } // Get implements kv.Client. -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { opsCtx, cancel := c.opsContext(ctx) defer cancel() diff --git a/pkg/ring/kv/kv_test.go b/pkg/ring/kv/kv_test.go index 37a51ae0da1..e5476e14ee2 100644 --- a/pkg/ring/kv/kv_test.go +++ b/pkg/ring/kv/kv_test.go @@ -52,14 +52,14 @@ var ( func TestCAS(t *testing.T) { withFixtures(t, func(t *testing.T, client Client) { // Blindly set key to "0". - err := client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + err := client.CAS(ctx, key, func(in any) (any, bool, error) { return "0", true, nil }) require.NoError(t, err) // Swap key to i+1 iff its i. - for i := 0; i < 10; i++ { - err = client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + for i := range 10 { + err = client.CAS(ctx, key, func(in any) (any, bool, error) { require.EqualValues(t, strconv.Itoa(i), in) return strconv.Itoa(i + 1), true, nil }) @@ -78,13 +78,13 @@ func TestCAS(t *testing.T) { func TestNilCAS(t *testing.T) { withFixtures(t, func(t *testing.T, client Client) { // Blindly set key to "0". - err := client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + err := client.CAS(ctx, key, func(in any) (any, bool, error) { return "0", true, nil }) require.NoError(t, err) // Ensure key is "0" and don't set it. - err = client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + err = client.CAS(ctx, key, func(in any) (any, bool, error) { require.EqualValues(t, "0", in) return nil, false, nil }) @@ -113,7 +113,7 @@ func TestWatchKey(t *testing.T) { // Start watching before we even start generating values. // Values will be buffered in the channel. t.Log("Watching in background", "key", key) - client.WatchKey(ctx, key, func(value interface{}) bool { + client.WatchKey(ctx, key, func(value any) bool { observedValuesCh <- value.(string) return true }) @@ -121,11 +121,11 @@ func TestWatchKey(t *testing.T) { // update value for the key go func() { - for i := 0; i < max; i++ { + for i := range max { // Start with sleeping, so that watching client see empty KV store at the beginning. time.Sleep(sleep) - err := client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { return fmt.Sprintf("%d", i), true, nil }) @@ -193,7 +193,7 @@ func TestWatchPrefix(t *testing.T) { defer wg.Done() // start watching before we even start generating values. values will be buffered - client.WatchPrefix(ctx, prefix, func(key string, val interface{}) bool { + client.WatchPrefix(ctx, prefix, func(key string, val any) bool { observedKeysCh <- key return true }) @@ -208,7 +208,7 @@ func TestWatchPrefix(t *testing.T) { time.Sleep(sleep) key := fmt.Sprintf("%s%d", p, i) - err := client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { return key, true, nil }) @@ -247,7 +247,7 @@ func TestWatchPrefix(t *testing.T) { wg.Wait() // verify that each key was reported once, and keys outside prefix were not reported - for i := 0; i < max; i++ { + for i := range max { key := fmt.Sprintf("%s%d", prefix, i) if observedKeys[key] != 1 { @@ -268,7 +268,7 @@ func TestList(t *testing.T) { withFixtures(t, func(t *testing.T, client Client) { for _, key := range keysToCreate { - err := client.CAS(context.Background(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(context.Background(), key, func(in any) (out any, retry bool, err error) { return key, false, nil }) require.NoError(t, err) diff --git a/pkg/ring/kv/memberlist/broadcast.go b/pkg/ring/kv/memberlist/broadcast.go index 6657b73a51d..d567c2e5edf 100644 --- a/pkg/ring/kv/memberlist/broadcast.go +++ b/pkg/ring/kv/memberlist/broadcast.go @@ -2,6 +2,7 @@ package memberlist import ( "fmt" + "slices" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -28,13 +29,7 @@ func (r ringBroadcast) Invalidates(old memberlist.Broadcast) bool { // and this broadcast has resulted in a newer ring update, we can invalidate the old value for _, oldName := range oldb.content { - found := false - for _, newName := range r.content { - if oldName == newName { - found = true - break - } - } + found := slices.Contains(r.content, oldName) if !found { return false diff --git a/pkg/ring/kv/memberlist/kv_init_service.go b/pkg/ring/kv/memberlist/kv_init_service.go index c3350e58451..7e0b1acb047 100644 --- a/pkg/ring/kv/memberlist/kv_init_service.go +++ b/pkg/ring/kv/memberlist/kv_init_service.go @@ -224,7 +224,7 @@ func viewKey(w http.ResponseWriter, store map[string]valueDesc, key string, form formatValue(w, store[key].value, format) } -func formatValue(w http.ResponseWriter, val interface{}, format string) { +func formatValue(w http.ResponseWriter, val any, format string) { w.WriteHeader(200) w.Header().Add("content-type", "text/plain") diff --git a/pkg/ring/kv/memberlist/memberlist_client.go b/pkg/ring/kv/memberlist/memberlist_client.go index 69f3bfd5ba6..206157c284a 100644 --- a/pkg/ring/kv/memberlist/memberlist_client.go +++ b/pkg/ring/kv/memberlist/memberlist_client.go @@ -60,7 +60,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { } // Get is part of kv.Client interface. -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return nil, err @@ -75,7 +75,7 @@ func (c *Client) Delete(ctx context.Context, key string) error { } // CAS is part of kv.Client interface -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return err @@ -85,7 +85,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou } // WatchKey is part of kv.Client interface. -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return @@ -96,7 +96,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b // WatchPrefix calls f whenever any value stored under prefix changes. // Part of kv.Client interface. -func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return @@ -658,13 +658,13 @@ func (m *KV) List(prefix string) []string { // Get returns current value associated with given key. // No communication with other nodes in the cluster is done here. -func (m *KV) Get(key string, codec codec.Codec) (interface{}, error) { +func (m *KV) Get(key string, codec codec.Codec) (any, error) { val, _, err := m.get(key, codec) return val, err } // Returns current value with removed tombstones. -func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint, err error) { +func (m *KV) get(key string, codec codec.Codec) (out any, version uint, err error) { m.storeMu.Lock() v := m.store[key].Clone() m.storeMu.Unlock() @@ -682,7 +682,7 @@ func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint, // latest value. Notifications that arrive while 'f' is running are coalesced into one subsequent 'f' call. // // Watching ends when 'f' returns false, context is done, or this client is shut down. -func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func(interface{}) bool) { +func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func(any) bool) { // keep one extra notification, to avoid missing notification if we're busy running the function w := make(chan string, 1) @@ -729,7 +729,7 @@ func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func // some notifications may be lost. // // Watching ends when 'f' returns false, context is done, or this client is shut down. -func (m *KV) WatchPrefix(ctx context.Context, prefix string, codec codec.Codec, f func(string, interface{}) bool) { +func (m *KV) WatchPrefix(ctx context.Context, prefix string, codec codec.Codec, f func(string, any) bool) { // we use bigger buffer here, since keys are interesting and we don't want to lose them. w := make(chan string, 16) @@ -828,7 +828,7 @@ func (m *KV) notifyWatchers(key string) { // KV store, and change is broadcast to cluster peers. Merge function is called with CAS flag on, so that it can // detect removals. If Merge doesn't result in any change (returns nil), then operation fails and is retried again. // After too many failed retries, this method returns error. -func (m *KV) CAS(ctx context.Context, key string, codec codec.Codec, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m *KV) CAS(ctx context.Context, key string, codec codec.Codec, f func(in any) (out any, retry bool, err error)) error { var lastError error outer: @@ -885,7 +885,7 @@ outer: // returns change, error (or nil, if CAS succeeded), and whether to retry or not. // returns errNoChangeDetected if merge failed to detect change in f's output. -func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in interface{}) (out interface{}, retry bool, err error)) (Mergeable, uint, bool, error) { +func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in any) (out any, retry bool, err error)) (Mergeable, uint, bool, error) { val, ver, err := m.get(key, codec) if err != nil { return nil, 0, false, fmt.Errorf("failed to get value: %v", err) diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index fbca1924c45..ed093670d10 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -6,6 +6,7 @@ import ( "encoding/gob" "errors" "fmt" + "maps" "math" "math/rand" "net" @@ -111,7 +112,7 @@ func (m member) clone() member { return out } -func (d *data) Clone() interface{} { +func (d *data) Clone() any { out := &data{ Members: make(map[string]member, len(d.Members)), } @@ -137,22 +138,22 @@ func (d dataCodec) CodecID() string { return "testDataCodec" } -func (d dataCodec) Decode(b []byte) (interface{}, error) { +func (d dataCodec) Decode(b []byte) (any, error) { dec := gob.NewDecoder(bytes.NewBuffer(b)) out := &data{} err := dec.Decode(out) return out, err } -func (d dataCodec) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (d dataCodec) DecodeMultiKey(map[string][]byte) (any, error) { return nil, errors.New("dataCodec does not support DecodeMultiKey") } -func (d dataCodec) EncodeMultiKey(interface{}) (map[string][]byte, error) { +func (d dataCodec) EncodeMultiKey(any) (map[string][]byte, error) { return nil, errors.New("dataCodec does not support EncodeMultiKey") } -func (d dataCodec) Encode(val interface{}) ([]byte, error) { +func (d dataCodec) Encode(val any) ([]byte, error) { buf := bytes.Buffer{} enc := gob.NewEncoder(&buf) err := enc.Encode(val) @@ -196,7 +197,7 @@ func updateFn(name string) func(*data) (*data, bool, error) { } } -func get(t *testing.T, kv *Client, key string) interface{} { +func get(t *testing.T, kv *Client, key string) any { val, err := kv.Get(context.Background(), key) if err != nil { t.Fatalf("Failed to get value for key %s: %v", key, err) @@ -227,7 +228,7 @@ func cas(t *testing.T, kv *Client, key string, updateFn func(*data) (*data, bool func casWithErr(ctx context.Context, t *testing.T, kv *Client, key string, updateFn func(*data) (*data, bool, error)) error { t.Helper() - fn := func(in interface{}) (out interface{}, retry bool, err error) { + fn := func(in any) (out any, retry bool, err error) { var r *data if in != nil { r = in.(*data) @@ -469,7 +470,7 @@ func TestMultipleCAS(t *testing.T) { const members = 10 const namePattern = "Member-%d" - for i := 0; i < members; i++ { + for i := range members { wg.Add(1) go func(name string) { defer wg.Done() @@ -487,7 +488,7 @@ func TestMultipleCAS(t *testing.T) { r := getData(t, kv, "test") require.True(t, r != nil, "nil ring") - for i := 0; i < members; i++ { + for i := range members { n := fmt.Sprintf(namePattern, i) if r.Members[n].State != ACTIVE { @@ -498,7 +499,7 @@ func TestMultipleCAS(t *testing.T) { // Make all members leave start = make(chan struct{}) - for i := 0; i < members; i++ { + for i := range members { wg.Add(1) go func(name string) { defer wg.Done() @@ -518,7 +519,7 @@ func TestMultipleCAS(t *testing.T) { r = getData(t, kv, "test") require.True(t, r != nil, "nil ring") - for i := 0; i < members; i++ { + for i := range members { n := fmt.Sprintf(namePattern, i) if r.Members[n].State != LEFT { @@ -540,7 +541,7 @@ func TestMultipleClients(t *testing.T) { port := 0 - for i := 0; i < members; i++ { + for i := range members { id := fmt.Sprintf("Member-%d", i) var cfg KVConfig flagext.DefaultValues(&cfg) @@ -581,7 +582,7 @@ func TestMultipleClients(t *testing.T) { firstKv := clients[0] ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) updates := 0 - firstKv.WatchKey(ctx, key, func(in interface{}) bool { + firstKv.WatchKey(ctx, key, func(in any) bool { updates++ r := in.(*data) @@ -610,7 +611,7 @@ func TestMultipleClients(t *testing.T) { // And same tokens. allTokens := []uint32(nil) - for i := 0; i < members; i++ { + for i := range members { kv := clients[i] r := getData(t, kv, key) @@ -743,7 +744,7 @@ func TestJoinMembersWithRetryBackoff(t *testing.T) { firstKv := clients[0] ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) observedMembers := 0 - firstKv.WatchKey(ctx, key, func(in interface{}) bool { + firstKv.WatchKey(ctx, key, func(in any) bool { r := in.(*data) observedMembers = len(r.Members) @@ -823,7 +824,7 @@ func TestMemberlistJoinOnStarting(t *testing.T) { mkv2 := NewKV(cfg2, log.NewNopLogger(), &dnsProviderMock{}, prometheus.NewPedanticRegistry()) require.NoError(t, mkv2.starting(context.Background())) - membersFunc := func() interface{} { + membersFunc := func() any { return mkv2.memberlist.NumMembers() } @@ -832,7 +833,7 @@ func TestMemberlistJoinOnStarting(t *testing.T) { func getFreePorts(count int) ([]int, error) { var ports []int - for i := 0; i < count; i++ { + for range count { addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") if err != nil { return nil, err @@ -945,11 +946,9 @@ func (dc distributedCounter) RemoveTombstones(limit time.Time) (_, _ int) { return } -func (dc distributedCounter) Clone() interface{} { +func (dc distributedCounter) Clone() any { out := make(distributedCounter, len(dc)) - for k, v := range dc { - out[k] = v - } + maps.Copy(out, dc) return out } @@ -959,25 +958,25 @@ func (d distributedCounterCodec) CodecID() string { return "distributedCounter" } -func (d distributedCounterCodec) Decode(b []byte) (interface{}, error) { +func (d distributedCounterCodec) Decode(b []byte) (any, error) { dec := gob.NewDecoder(bytes.NewBuffer(b)) out := &distributedCounter{} err := dec.Decode(out) return *out, err } -func (d distributedCounterCodec) Encode(val interface{}) ([]byte, error) { +func (d distributedCounterCodec) Encode(val any) ([]byte, error) { buf := bytes.Buffer{} enc := gob.NewEncoder(&buf) err := enc.Encode(val) return buf.Bytes(), err } -func (d distributedCounterCodec) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (d distributedCounterCodec) DecodeMultiKey(map[string][]byte) (any, error) { return nil, errors.New("distributedCounterCodec does not support DecodeMultiKey") } -func (d distributedCounterCodec) EncodeMultiKey(interface{}) (map[string][]byte, error) { +func (d distributedCounterCodec) EncodeMultiKey(any) (map[string][]byte, error) { return nil, errors.New("distributedCounterCodec does not support EncodeMultiKey") } @@ -1006,7 +1005,7 @@ func TestMultipleCodecs(t *testing.T) { kv2, err := NewClient(mkv1, distributedCounterCodec{}) require.NoError(t, err) - err = kv1.CAS(context.Background(), "data", func(in interface{}) (out interface{}, retry bool, err error) { + err = kv1.CAS(context.Background(), "data", func(in any) (out any, retry bool, err error) { var d *data if in != nil { d = in.(*data) @@ -1025,7 +1024,7 @@ func TestMultipleCodecs(t *testing.T) { }) require.NoError(t, err) - err = kv2.CAS(context.Background(), "counter", func(in interface{}) (out interface{}, retry bool, err error) { + err = kv2.CAS(context.Background(), "counter", func(in any) (out any, retry bool, err error) { var dc distributedCounter if in != nil { dc = in.(distributedCounter) @@ -1099,7 +1098,7 @@ func TestRejoin(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), mkv2)) defer services.StopAndAwaitTerminated(context.Background(), mkv2) //nolint:errcheck - membersFunc := func() interface{} { + membersFunc := func() any { return mkv2.memberlist.NumMembers() } @@ -1156,7 +1155,7 @@ func TestNotifyMsgResendsOnlyChanges(t *testing.T) { now := time.Now() - require.NoError(t, client.CAS(context.Background(), key, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, client.CAS(context.Background(), key, func(in any) (out any, retry bool, err error) { d := getOrCreateData(in) d.Members["a"] = member{Timestamp: now.Unix(), State: JOINING} d.Members["b"] = member{Timestamp: now.Unix(), State: JOINING} @@ -1299,7 +1298,7 @@ func decodeDataFromMarshalledKeyValuePair(t *testing.T, marshalledKVP []byte, ke return d } -func marshalKeyValuePair(t *testing.T, key string, codec codec.Codec, value interface{}) []byte { +func marshalKeyValuePair(t *testing.T, key string, codec codec.Codec, value any) []byte { data, err := codec.Encode(value) require.NoError(t, err) @@ -1309,7 +1308,7 @@ func marshalKeyValuePair(t *testing.T, key string, codec codec.Codec, value inte return data } -func getOrCreateData(in interface{}) *data { +func getOrCreateData(in any) *data { // Modify value that was passed as a parameter. // Client takes care of concurrent modifications. r, ok := in.(*data) @@ -1320,7 +1319,7 @@ func getOrCreateData(in interface{}) *data { } // poll repeatedly evaluates condition until we either timeout, or it succeeds. -func poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { +func poll(t testing.TB, d time.Duration, want any, have func() any) { t.Helper() deadline := time.Now().Add(d) @@ -1340,7 +1339,7 @@ func poll(t testing.TB, d time.Duration, want interface{}, have func() interface type testLogger struct { } -func (l testLogger) Log(keyvals ...interface{}) error { +func (l testLogger) Log(keyvals ...any) error { return nil } diff --git a/pkg/ring/kv/memberlist/memberlist_logger.go b/pkg/ring/kv/memberlist/memberlist_logger.go index 30a28d06856..4574216b98b 100644 --- a/pkg/ring/kv/memberlist/memberlist_logger.go +++ b/pkg/ring/kv/memberlist/memberlist_logger.go @@ -30,7 +30,7 @@ func newMemberlistLoggerAdapter(logger log.Logger, logTimestamp bool) io.Writer func (a loggerAdapter) Write(p []byte) (int, error) { result := subexps(p) - keyvals := []interface{}{} + keyvals := []any{} var timestamp string if date, ok := result["date"]; ok && date != "" { timestamp = date diff --git a/pkg/ring/kv/metrics.go b/pkg/ring/kv/metrics.go index 38ec3b59cd6..30ed8ff4aa3 100644 --- a/pkg/ring/kv/metrics.go +++ b/pkg/ring/kv/metrics.go @@ -71,8 +71,8 @@ func (m metrics) List(ctx context.Context, prefix string) ([]string, error) { return result, err } -func (m metrics) Get(ctx context.Context, key string) (interface{}, error) { - var result interface{} +func (m metrics) Get(ctx context.Context, key string) (any, error) { + var result any err := instrument.CollectedRequest(ctx, "GET", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error { var err error result, err = m.c.Get(ctx, key) @@ -88,20 +88,20 @@ func (m metrics) Delete(ctx context.Context, key string) error { return err } -func (m metrics) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m metrics) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return instrument.CollectedRequest(ctx, "CAS", m.requestDuration, getCasErrorCode, func(ctx context.Context) error { return m.c.CAS(ctx, key, f) }) } -func (m metrics) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m metrics) WatchKey(ctx context.Context, key string, f func(any) bool) { _ = instrument.CollectedRequest(ctx, "WatchKey", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error { m.c.WatchKey(ctx, key, f) return nil }) } -func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { _ = instrument.CollectedRequest(ctx, "WatchPrefix", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error { m.c.WatchPrefix(ctx, prefix, f) return nil diff --git a/pkg/ring/kv/mock.go b/pkg/ring/kv/mock.go index cbe23106a8e..f889be60d81 100644 --- a/pkg/ring/kv/mock.go +++ b/pkg/ring/kv/mock.go @@ -21,7 +21,7 @@ func (m mockClient) List(ctx context.Context, prefix string) ([]string, error) { return []string{}, nil } -func (m mockClient) Get(ctx context.Context, key string) (interface{}, error) { +func (m mockClient) Get(ctx context.Context, key string) (any, error) { return "", nil } @@ -29,14 +29,14 @@ func (m mockClient) Delete(ctx context.Context, key string) error { return nil } -func (m mockClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m mockClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return nil } -func (m mockClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m mockClient) WatchKey(ctx context.Context, key string, f func(any) bool) { } -func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { } func (m mockClient) LastUpdateTime(key string) time.Time { diff --git a/pkg/ring/kv/multi.go b/pkg/ring/kv/multi.go index e4ac994d769..98c2a04b65a 100644 --- a/pkg/ring/kv/multi.go +++ b/pkg/ring/kv/multi.go @@ -290,7 +290,7 @@ func (m *MultiClient) List(ctx context.Context, prefix string) ([]string, error) } // Get is a part of kv.Client interface. -func (m *MultiClient) Get(ctx context.Context, key string) (interface{}, error) { +func (m *MultiClient) Get(ctx context.Context, key string) (any, error) { _, kv := m.getPrimaryClient() return kv.client.Get(ctx, key) } @@ -302,11 +302,11 @@ func (m *MultiClient) Delete(ctx context.Context, key string) error { } // CAS is a part of kv.Client interface. -func (m *MultiClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m *MultiClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { _, kv := m.getPrimaryClient() - updatedValue := interface{}(nil) - err := kv.client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + updatedValue := any(nil) + err := kv.client.CAS(ctx, key, func(in any) (any, bool, error) { out, retry, err := f(in) updatedValue = out return out, retry, err @@ -320,7 +320,7 @@ func (m *MultiClient) CAS(ctx context.Context, key string, f func(in interface{} } // WatchKey is a part of kv.Client interface. -func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(any) bool) { _ = m.runWithPrimaryClient(ctx, func(newCtx context.Context, primary kvclient) error { primary.client.WatchKey(newCtx, key, f) return newCtx.Err() @@ -328,7 +328,7 @@ func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(interface } // WatchPrefix is a part of kv.Client interface. -func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { _ = m.runWithPrimaryClient(ctx, func(newCtx context.Context, primary kvclient) error { primary.client.WatchPrefix(newCtx, prefix, f) return newCtx.Err() @@ -340,7 +340,7 @@ func (m *MultiClient) LastUpdateTime(key string) time.Time { return kv.client.LastUpdateTime(key) } -func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue interface{}) { +func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue any) { if m.mirrorTimeout > 0 { var cfn context.CancelFunc ctx, cfn = context.WithTimeout(ctx, m.mirrorTimeout) @@ -354,7 +354,7 @@ func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, ke } m.mirrorWritesCounter.Inc() - err := kvc.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvc.client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { // try once return newValue, false, nil }) diff --git a/pkg/ring/kv/prefix.go b/pkg/ring/kv/prefix.go index aba9b7a092c..d9406b4ff62 100644 --- a/pkg/ring/kv/prefix.go +++ b/pkg/ring/kv/prefix.go @@ -37,24 +37,24 @@ func (c *prefixedKVClient) List(ctx context.Context, prefix string) ([]string, e // CAS atomically modifies a value in a callback. If the value doesn't exist, // you'll get 'nil' as an argument to your callback. -func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return c.client.CAS(ctx, c.prefix+key, f) } // WatchKey watches a key. -func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(any) bool) { c.client.WatchKey(ctx, c.prefix+key, f) } // WatchPrefix watches a prefix. For a prefix client it appends the prefix argument to the clients prefix. -func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { - c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i interface{}) bool { +func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { + c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i any) bool { return f(strings.TrimPrefix(k, c.prefix), i) }) } // Get looks up a given object from its key. -func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, error) { +func (c *prefixedKVClient) Get(ctx context.Context, key string) (any, error) { return c.client.Get(ctx, c.prefix+key) } diff --git a/pkg/ring/lifecycler.go b/pkg/ring/lifecycler.go index 1a6812a9411..6038de2277b 100644 --- a/pkg/ring/lifecycler.go +++ b/pkg/ring/lifecycler.go @@ -446,7 +446,7 @@ func (i *Lifecycler) ClaimTokensFor(ctx context.Context, ingesterID string) erro fn := func() { var tokens Tokens - claimTokens := func(in interface{}) (out interface{}, retry bool, err error) { + claimTokens := func(in any) (out any, retry bool, err error) { ringDesc, ok := in.(*Desc) if !ok || ringDesc == nil { return nil, false, fmt.Errorf("cannot claim tokens in an empty ring") @@ -722,7 +722,7 @@ func (i *Lifecycler) initRing(ctx context.Context) (bool, error) { level.Info(i.logger).Log("msg", "not loading tokens from file, tokens file path is empty") } - err = i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { ringDesc = NewDesc() } else { @@ -821,7 +821,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) { if ratio > 1 { ratio = 1 } - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { return in, false, nil } @@ -837,7 +837,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) { ringTokens, _ := ringDesc.TokensFor(i.ID) // Removing random tokens - for i := 0; i < tokensToBeRenewed; i++ { + for range tokensToBeRenewed { if len(ringTokens) == 0 { break } @@ -869,7 +869,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) { func (i *Lifecycler) verifyTokens(ctx context.Context) bool { result := false - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { var ringDesc *Desc if in == nil { ringDesc = NewDesc() @@ -920,7 +920,7 @@ func (i *Lifecycler) compareTokens(fromRing Tokens) bool { return false } - for i := 0; i < len(tokens); i++ { + for i := range tokens { if tokens[i] != fromRing[i] { return false } @@ -932,7 +932,7 @@ func (i *Lifecycler) compareTokens(fromRing Tokens) bool { func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState, alreadyInRing bool) error { var ringDesc *Desc - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { ringDesc = NewDesc() } else { @@ -987,7 +987,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState, al func (i *Lifecycler) updateConsul(ctx context.Context) error { var ringDesc *Desc - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { ringDesc = NewDesc() } else { @@ -1121,7 +1121,7 @@ func (i *Lifecycler) processShutdown(ctx context.Context) { func (i *Lifecycler) unregister(ctx context.Context) error { level.Debug(i.logger).Log("msg", "unregistering instance from ring", "ring", i.RingName) - return i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + return i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { return nil, false, fmt.Errorf("found empty ring when trying to unregister") } diff --git a/pkg/ring/lifecycler_test.go b/pkg/ring/lifecycler_test.go index 6778e053eb3..b7dc0afb3ea 100644 --- a/pkg/ring/lifecycler_test.go +++ b/pkg/ring/lifecycler_test.go @@ -47,7 +47,7 @@ func testLifecyclerConfigWithAddr(ringConfig Config, id string, addr string) Lif return l } -func checkNormalised(d interface{}, id string) bool { +func checkNormalised(d any, id string) bool { desc, ok := d.(*Desc) return ok && len(desc.Ingesters) == 1 && @@ -117,7 +117,7 @@ func TestLifecycler_RenewTokens(t *testing.T) { require.Len(t, newTokens, 512) require.IsIncreasing(t, newTokens) diff := 0 - for i := 0; i < len(originalTokens); i++ { + for i := range originalTokens { if !slices.Contains(originalTokens, newTokens[i]) { diff++ } @@ -203,7 +203,7 @@ func TestLifecycler_HealthyInstancesCount(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler1) // nolint:errcheck // Assert the first ingester joined the ring - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { return lifecycler1.HealthyInstancesCount() == 1 }) @@ -220,12 +220,12 @@ func TestLifecycler_HealthyInstancesCount(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler2) // nolint:errcheck // Assert the second ingester joined the ring - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { return lifecycler2.HealthyInstancesCount() == 2 }) // Assert the first ingester count is updated - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { return lifecycler1.HealthyInstancesCount() == 2 }) } @@ -265,7 +265,7 @@ func TestLifecycler_ZonesCount(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler) // nolint:errcheck // Wait until joined. - test.Poll(t, time.Second, idx+1, func() interface{} { + test.Poll(t, time.Second, idx+1, func() any { return lifecycler.HealthyInstancesCount() }) @@ -288,7 +288,7 @@ func TestLifecycler_NilFlushTransferer(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), lifecycler)) // Ensure the lifecycler joined the ring - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return lifecycler.HealthyInstancesCount() }) @@ -322,11 +322,11 @@ func TestLifecycler_TwoRingsWithDifferentKeysOnTheSameKVStore(t *testing.T) { // Ensure each lifecycler reports 1 healthy instance, because they're // in a different ring - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return lifecycler1.HealthyInstancesCount() }) - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return lifecycler2.HealthyInstancesCount() }) } @@ -358,7 +358,7 @@ func TestLifecycler_ShouldHandleInstanceAbruptlyRestarted(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check this ingester joined, is active, and has one token. - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) return checkNormalised(d, "ing1") @@ -377,7 +377,7 @@ func TestLifecycler_ShouldHandleInstanceAbruptlyRestarted(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l2)) // Check the new ingester picked up the same tokens and registered timestamp. - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -389,11 +389,11 @@ func TestLifecycler_ShouldHandleInstanceAbruptlyRestarted(t *testing.T) { type MockClient struct { ListFunc func(ctx context.Context, prefix string) ([]string, error) - GetFunc func(ctx context.Context, key string) (interface{}, error) + GetFunc func(ctx context.Context, key string) (any, error) DeleteFunc func(ctx context.Context, key string) error - CASFunc func(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error - WatchKeyFunc func(ctx context.Context, key string, f func(interface{}) bool) - WatchPrefixFunc func(ctx context.Context, prefix string, f func(string, interface{}) bool) + CASFunc func(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error + WatchKeyFunc func(ctx context.Context, key string, f func(any) bool) + WatchPrefixFunc func(ctx context.Context, prefix string, f func(string, any) bool) } func (m *MockClient) List(ctx context.Context, prefix string) ([]string, error) { @@ -404,7 +404,7 @@ func (m *MockClient) List(ctx context.Context, prefix string) ([]string, error) return nil, nil } -func (m *MockClient) Get(ctx context.Context, key string) (interface{}, error) { +func (m *MockClient) Get(ctx context.Context, key string) (any, error) { if m.GetFunc != nil { return m.GetFunc(ctx, key) } @@ -420,7 +420,7 @@ func (m *MockClient) Delete(ctx context.Context, key string) error { return nil } -func (m *MockClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m *MockClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { if m.CASFunc != nil { return m.CASFunc(ctx, key, f) } @@ -428,13 +428,13 @@ func (m *MockClient) CAS(ctx context.Context, key string, f func(in interface{}) return nil } -func (m *MockClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m *MockClient) WatchKey(ctx context.Context, key string, f func(any) bool) { if m.WatchKeyFunc != nil { m.WatchKeyFunc(ctx, key, f) } } -func (m *MockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m *MockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { if m.WatchPrefixFunc != nil { m.WatchPrefixFunc(ctx, prefix, f) } @@ -525,7 +525,7 @@ func TestCheckReady_MinReadyDuration(t *testing.T) { assert.NoError(t, l.CheckReady(ctx)) } else { // Poll the readiness check until ready and measure how much time it takes. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { return l.CheckReady(ctx) }) @@ -604,7 +604,7 @@ func TestCheckReady_CheckRingHealth(t *testing.T) { waitRingInstance(t, 3*time.Second, l2, func(instance InstanceDesc) error { return nil }) // Poll the readiness check until ready and measure how much time it takes. - test.Poll(t, 5*time.Second, nil, func() interface{} { + test.Poll(t, 5*time.Second, nil, func() any { return l1.CheckReady(ctx) }) @@ -635,7 +635,7 @@ func TestRestartIngester_DisabledHeartbeat_unregister_on_shutdown_false(t *testi // poll function waits for a condition and returning actual state of the ingesters after the condition succeed. poll := func(condition func(*Desc) bool) map[string]InstanceDesc { var ingesters map[string]InstanceDesc - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -695,7 +695,7 @@ func TestRestartIngester_DisabledHeartbeat_unregister_on_shutdown_false(t *testi require.NoError(t, services.StopAndAwaitTerminated(context.Background(), l2)) // Simulate ingester2 crash on startup and left the ring with JOINING state - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { desc, ok := in.(*Desc) require.Equal(t, true, ok) ingester2Desc := desc.Ingesters["ing2"] @@ -709,7 +709,7 @@ func TestRestartIngester_DisabledHeartbeat_unregister_on_shutdown_false(t *testi require.NoError(t, services.StopAndAwaitTerminated(context.Background(), l2)) // Simulate ingester2 crash on startup and left the ring with PENDING state - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { desc, ok := in.(*Desc) require.Equal(t, true, ok) ingester2Desc := desc.Ingesters["ing2"] @@ -757,7 +757,7 @@ func TestRestartIngester_READONLY(t *testing.T) { // poll function waits for a condition and returning actual state of the ingesters after the condition succeed. poll := func(condition func(*Desc) bool) map[string]InstanceDesc { var ingesters map[string]InstanceDesc - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -854,7 +854,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Check this ingester joined, is active, and has 512 token. var expTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -871,7 +871,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Change state from ACTIVE to READONLY err = l1.ChangeState(context.Background(), READONLY) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -891,7 +891,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Check this ingester joined, is active, and has 512 token. var actTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -907,7 +907,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Check for same tokens. slices.Sort(expTokens) slices.Sort(actTokens) - for i := 0; i < 512; i++ { + for range 512 { require.Equal(t, expTokens, actTokens) } } @@ -937,7 +937,7 @@ func TestRegisteredAtOnBackToActive(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check this ingester joined, is active. - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -957,7 +957,7 @@ func TestRegisteredAtOnBackToActive(t *testing.T) { // Change state from ACTIVE to READONLY err = l1.ChangeState(context.Background(), READONLY) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -972,7 +972,7 @@ func TestRegisteredAtOnBackToActive(t *testing.T) { // Change state from READONLY to ACTIVE err = l1.ChangeState(context.Background(), ACTIVE) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1021,7 +1021,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Check this ingester joined, is active, and has 512 token. var expTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1038,7 +1038,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Change state from ACTIVE to READONLY err = l1.ChangeState(context.Background(), READONLY) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1057,7 +1057,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), l2) //nolint:errcheck // Check this ingester should not in the ring before calling Join - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -1073,7 +1073,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Check this ingester joined, is in readonly state, and has 512 token. var actTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -1089,7 +1089,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Check for same tokens. slices.Sort(expTokens) slices.Sort(actTokens) - for i := 0; i < 512; i++ { + for range 512 { require.Equal(t, expTokens, actTokens) } } @@ -1113,7 +1113,7 @@ func TestJoinInLeavingState(t *testing.T) { cfg.MinReadyDuration = 1 * time.Nanosecond // Set state as LEAVING - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1135,7 +1135,7 @@ func TestJoinInLeavingState(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to join after coming up in LEAVING - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1166,7 +1166,7 @@ func TestJoinInLeavingStateAndLessTokens(t *testing.T) { cfg.MinReadyDuration = 1 * time.Nanosecond // Set state as LEAVING and 1 less token because of conflict resolution - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1188,7 +1188,7 @@ func TestJoinInLeavingStateAndLessTokens(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to join after coming up in LEAVING - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1222,7 +1222,7 @@ func TestJoinInJoiningState(t *testing.T) { instance2RegisteredAt := time.Now().Add(-2 * time.Hour) // Set state as JOINING - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1246,7 +1246,7 @@ func TestJoinInJoiningState(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to join after coming up in JOINING - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1282,7 +1282,7 @@ func TestRestoreOfZoneWhenOverwritten(t *testing.T) { cfg := testLifecyclerConfig(ringConfig, "ing1") // Set ing1 to not have a zone - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1305,7 +1305,7 @@ func TestRestoreOfZoneWhenOverwritten(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to reset the zone value to the expected setting - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -1318,7 +1318,7 @@ func TestRestoreOfZoneWhenOverwritten(t *testing.T) { } func waitRingInstance(t *testing.T, timeout time.Duration, l *Lifecycler, check func(instance InstanceDesc) error) { - test.Poll(t, timeout, nil, func() interface{} { + test.Poll(t, timeout, nil, func() any { desc, err := l.KVStore.Get(context.Background(), l.RingKey) if err != nil { return err diff --git a/pkg/ring/model.go b/pkg/ring/model.go index 70b767740ef..82d0f9ccb3f 100644 --- a/pkg/ring/model.go +++ b/pkg/ring/model.go @@ -3,6 +3,7 @@ package ring import ( "container/heap" "fmt" + "maps" "sort" "sync" "time" @@ -355,7 +356,7 @@ func tokensEqual(lhs, rhs []uint32) bool { if len(lhs) != len(rhs) { return false } - for i := 0; i < len(lhs); i++ { + for i := range lhs { if lhs[i] != rhs[i] { return false } @@ -363,7 +364,7 @@ func tokensEqual(lhs, rhs []uint32) bool { return true } -var tokenMapPool = sync.Pool{New: func() interface{} { return make(map[uint32]struct{}) }} +var tokenMapPool = sync.Pool{New: func() any { return make(map[uint32]struct{}) }} func conflictingTokensExist(normalizedIngesters map[string]InstanceDesc) bool { tokensMap := tokenMapPool.Get().(map[uint32]struct{}) @@ -472,7 +473,7 @@ func (d *Desc) RemoveTombstones(limit time.Time) (total, removed int) { } // Clone returns a deep copy of the ring state. -func (d *Desc) Clone() interface{} { +func (d *Desc) Clone() any { return proto.Clone(d).(*Desc) } @@ -626,7 +627,7 @@ func (d *Desc) RingCompare(o *Desc) CompareResult { return Equal } -func GetOrCreateRingDesc(d interface{}) *Desc { +func GetOrCreateRingDesc(d any) *Desc { if d == nil { return NewDesc() } @@ -649,11 +650,11 @@ func (h TokensHeap) Less(i, j int) bool { return h[i][0] < h[j][0] } -func (h *TokensHeap) Push(x interface{}) { +func (h *TokensHeap) Push(x any) { *h = append(*h, x.([]uint32)) } -func (h *TokensHeap) Pop() interface{} { +func (h *TokensHeap) Pop() any { old := *h n := len(old) x := old[n-1] @@ -709,8 +710,8 @@ func MergeTokensByZone(zones map[string][][]uint32) map[string][]uint32 { return out } -func (d *Desc) SplitByID() map[string]interface{} { - out := make(map[string]interface{}, len(d.Ingesters)) +func (d *Desc) SplitByID() map[string]any { + out := make(map[string]any, len(d.Ingesters)) for key := range d.Ingesters { in := d.Ingesters[key] out[key] = &in @@ -718,7 +719,7 @@ func (d *Desc) SplitByID() map[string]interface{} { return out } -func (d *Desc) JoinIds(in map[string]interface{}) { +func (d *Desc) JoinIds(in map[string]any) { for key, value := range in { d.Ingesters[key] = *(value.(*InstanceDesc)) } @@ -728,7 +729,7 @@ func (d *Desc) GetItemFactory() proto.Message { return &InstanceDesc{} } -func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) { +func (d *Desc) FindDifference(o codec.MultiKey) (any, []string, error) { out, ok := o.(*Desc) if !ok { // This method only deals with non-nil rings. @@ -754,9 +755,7 @@ func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) { //If existent data is empty if d == nil { - for key, value := range out.Ingesters { - toUpdated.Ingesters[key] = value - } + maps.Copy(toUpdated.Ingesters, out.Ingesters) return toUpdated, toDelete, nil } diff --git a/pkg/ring/model_test.go b/pkg/ring/model_test.go index f34b6e566d2..16295ff3541 100644 --- a/pkg/ring/model_test.go +++ b/pkg/ring/model_test.go @@ -48,7 +48,6 @@ func TestInstanceDesc_IsHealthy_ForIngesterOperations(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { actual := testData.ingester.IsHealthy(Write, testData.timeout, time.Now()) @@ -560,22 +559,22 @@ func TestMergeTokensByZone(t *testing.T) { func TestDesc_SplitById_JoinIds(t *testing.T) { tests := map[string]struct { ring *Desc - split map[string]interface{} + split map[string]any }{ "empty ring": { ring: &Desc{Ingesters: map[string]InstanceDesc{}}, - split: map[string]interface{}{}, + split: map[string]any{}, }, "single instance": { ring: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}}, - split: map[string]interface{}{"ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}, + split: map[string]any{"ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}, }, "two instances": { ring: &Desc{Ingesters: map[string]InstanceDesc{ "ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}, "ing2": {Addr: "addr2", Tokens: []uint32{3, 4, 5}, Timestamp: 5678, State: ACTIVE, Zone: "zone2", RegisteredTimestamp: 567}, }}, - split: map[string]interface{}{ + split: map[string]any{ "ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}, "ing2": &InstanceDesc{Addr: "addr2", Tokens: []uint32{3, 4, 5}, Timestamp: 5678, State: ACTIVE, Zone: "zone2", RegisteredTimestamp: 567}, }, @@ -612,8 +611,8 @@ func TestDesc_FindDifference(t *testing.T) { tests := map[string]struct { r1 *Desc r2 *Desc - toUpdate interface{} - toDelete interface{} + toUpdate any + toDelete any }{ "nil rings": { r1: nil, diff --git a/pkg/ring/replication_set.go b/pkg/ring/replication_set.go index 31e4dc016fa..c534d919bb2 100644 --- a/pkg/ring/replication_set.go +++ b/pkg/ring/replication_set.go @@ -27,9 +27,9 @@ type ReplicationSet struct { // Do function f in parallel for all replicas in the set, erroring is we exceed // MaxErrors and returning early otherwise. zoneResultsQuorum allows only include // results from zones that already reach quorum to improve performance. -func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, *InstanceDesc) (interface{}, error)) ([]interface{}, error) { +func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, *InstanceDesc) (any, error)) ([]any, error) { type instanceResult struct { - res interface{} + res any err error instance *InstanceDesc } @@ -180,7 +180,7 @@ func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude fun sort.Sort(ByAddr(beforeInstances)) sort.Sort(ByAddr(afterInstances)) - for i := 0; i < len(beforeInstances); i++ { + for i := range beforeInstances { b := beforeInstances[i] a := afterInstances[i] diff --git a/pkg/ring/replication_set_test.go b/pkg/ring/replication_set_test.go index 401ec7d4094..90dbd2b9fd9 100644 --- a/pkg/ring/replication_set_test.go +++ b/pkg/ring/replication_set_test.go @@ -3,6 +3,7 @@ package ring import ( "context" "errors" + "slices" "testing" "time" @@ -90,9 +91,9 @@ var ( ) // Return a function that fails starting from failAfter times -func failingFunctionAfter(failAfter int32, delay time.Duration) func(context.Context, *InstanceDesc) (interface{}, error) { +func failingFunctionAfter(failAfter int32, delay time.Duration) func(context.Context, *InstanceDesc) (any, error) { count := atomic.NewInt32(0) - return func(context.Context, *InstanceDesc) (interface{}, error) { + return func(context.Context, *InstanceDesc) (any, error) { time.Sleep(delay) if count.Inc() > failAfter { return nil, errFailure @@ -101,12 +102,10 @@ func failingFunctionAfter(failAfter int32, delay time.Duration) func(context.Con } } -func failingFunctionOnZones(zones ...string) func(context.Context, *InstanceDesc) (interface{}, error) { - return func(ctx context.Context, ing *InstanceDesc) (interface{}, error) { - for _, zone := range zones { - if ing.Zone == zone { - return nil, errZoneFailure - } +func failingFunctionOnZones(zones ...string) func(context.Context, *InstanceDesc) (any, error) { + return func(ctx context.Context, ing *InstanceDesc) (any, error) { + if slices.Contains(zones, ing.Zone) { + return nil, errZoneFailure } return 1, nil } @@ -118,10 +117,10 @@ func TestReplicationSet_Do(t *testing.T) { instances []InstanceDesc maxErrors int maxUnavailableZones int - f func(context.Context, *InstanceDesc) (interface{}, error) + f func(context.Context, *InstanceDesc) (any, error) delay time.Duration cancelContextDelay time.Duration - want []interface{} + want []any expectedError error zoneResultsQuorum bool queryPartialData bool @@ -132,15 +131,15 @@ func TestReplicationSet_Do(t *testing.T) { instances: []InstanceDesc{ {}, }, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return 1, nil }, - want: []interface{}{1}, + want: []any{1}, }, { name: "max errors = 0, should fail on 1 error out of 1 instance", instances: []InstanceDesc{{}}, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return nil, errFailure }, want: nil, @@ -166,7 +165,7 @@ func TestReplicationSet_Do(t *testing.T) { name: "max errors = 1, should handle context canceled", instances: []InstanceDesc{{}, {}, {}}, maxErrors: 1, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { time.Sleep(300 * time.Millisecond) return 1, nil }, @@ -177,17 +176,17 @@ func TestReplicationSet_Do(t *testing.T) { { name: "max errors = 0, should succeed on all successful instances", instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}}, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return 1, nil }, - want: []interface{}{1, 1, 1}, + want: []any{1, 1, 1}, }, { name: "max unavailable zones = 1, should succeed on instances failing in 1 out of 3 zones (3 instances)", instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}}, f: failingFunctionOnZones("zone1"), maxUnavailableZones: 1, - want: []interface{}{1, 1}, + want: []any{1, 1}, }, { name: "max unavailable zones = 1, should fail on instances failing in 2 out of 3 zones (3 instances)", @@ -199,7 +198,7 @@ func TestReplicationSet_Do(t *testing.T) { { name: "with partial data enabled and max unavailable zones = 1, should succeed on instances failing in 2 out of 3 zones (6 instances)", instances: []InstanceDesc{{Addr: "10.0.0.1", Zone: "zone1"}, {Addr: "10.0.0.2", Zone: "zone2"}, {Addr: "10.0.0.3", Zone: "zone3"}, {Addr: "10.0.0.4", Zone: "zone1"}, {Addr: "10.0.0.5", Zone: "zone2"}, {Addr: "10.0.0.6", Zone: "zone3"}}, - f: func(ctx context.Context, ing *InstanceDesc) (interface{}, error) { + f: func(ctx context.Context, ing *InstanceDesc) (any, error) { if ing.Addr == "10.0.0.1" || ing.Addr == "10.0.0.2" { return nil, errZoneFailure } @@ -207,7 +206,7 @@ func TestReplicationSet_Do(t *testing.T) { }, maxUnavailableZones: 1, queryPartialData: true, - want: []interface{}{1, 1, 1, 1}, + want: []any{1, 1, 1, 1}, expectedError: partialdata.ErrPartialData, errStrContains: []string{"10.0.0.1", "10.0.0.2", "zone failed"}, }, @@ -222,7 +221,7 @@ func TestReplicationSet_Do(t *testing.T) { { name: "with partial data enabled, should fail on instances returning 422", instances: []InstanceDesc{{Addr: "1", Zone: "zone1"}, {Addr: "2", Zone: "zone2"}, {Addr: "3", Zone: "zone3"}, {Addr: "4", Zone: "zone1"}, {Addr: "5", Zone: "zone2"}, {Addr: "6", Zone: "zone3"}}, - f: func(ctx context.Context, ing *InstanceDesc) (interface{}, error) { + f: func(ctx context.Context, ing *InstanceDesc) (any, error) { if ing.Addr == "1" || ing.Addr == "2" { return nil, validation.LimitError("limit breached") } @@ -237,7 +236,7 @@ func TestReplicationSet_Do(t *testing.T) { instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone2"}, {Zone: "zone3"}, {Zone: "zone3"}}, f: failingFunctionOnZones("zone1"), maxUnavailableZones: 1, - want: []interface{}{1, 1, 1, 1}, + want: []any{1, 1, 1, 1}, }, { name: "max unavailable zones = 2, should fail on instances failing in 3 out of 5 zones (5 instances)", @@ -251,16 +250,16 @@ func TestReplicationSet_Do(t *testing.T) { instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone2"}, {Zone: "zone3"}, {Zone: "zone3"}, {Zone: "zone4"}, {Zone: "zone4"}, {Zone: "zone5"}, {Zone: "zone5"}}, f: failingFunctionOnZones("zone1", "zone5"), maxUnavailableZones: 2, - want: []interface{}{1, 1, 1, 1, 1, 1}, + want: []any{1, 1, 1, 1, 1, 1}, }, { name: "max unavailable zones = 1, zoneResultsQuorum = true, should contain 4 results (2 from zone1, 2 from zone2)", instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}, {Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}}, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return 1, nil }, maxUnavailableZones: 1, - want: []interface{}{1, 1, 1, 1}, + want: []any{1, 1, 1, 1}, zoneResultsQuorum: true, }, } diff --git a/pkg/ring/replication_set_tracker.go b/pkg/ring/replication_set_tracker.go index bc7401240d7..0ea465cfdab 100644 --- a/pkg/ring/replication_set_tracker.go +++ b/pkg/ring/replication_set_tracker.go @@ -8,7 +8,7 @@ type replicationSetResultTracker interface { // Signals an instance has done the execution, either successful (no error) // or failed (with error). If successful, result will be recorded and can // be accessed via getResults. - done(instance *InstanceDesc, result interface{}, err error) + done(instance *InstanceDesc, result any, err error) // Returns true if all instances are done executing finished() bool @@ -23,7 +23,7 @@ type replicationSetResultTracker interface { failedCompletely() bool // Returns recorded results. - getResults() []interface{} + getResults() []any // Returns errors getErrors() []error @@ -34,7 +34,7 @@ type defaultResultTracker struct { numSucceeded int numErrors int maxErrors int - results []interface{} + results []any numInstances int errors []error } @@ -46,12 +46,12 @@ func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultRe numErrors: 0, maxErrors: maxErrors, errors: make([]error, 0, len(instances)), - results: make([]interface{}, 0, len(instances)), + results: make([]any, 0, len(instances)), numInstances: len(instances), } } -func (t *defaultResultTracker) done(instance *InstanceDesc, result interface{}, err error) { +func (t *defaultResultTracker) done(instance *InstanceDesc, result any, err error) { if err == nil { t.numSucceeded++ t.results = append(t.results, result) @@ -77,7 +77,7 @@ func (t *defaultResultTracker) failedCompletely() bool { return t.numInstances == t.numErrors } -func (t *defaultResultTracker) getResults() []interface{} { +func (t *defaultResultTracker) getResults() []any { return t.results } @@ -92,7 +92,7 @@ type zoneAwareResultTracker struct { failuresByZone map[string]int minSuccessfulZones int maxUnavailableZones int - resultsPerZone map[string][]interface{} + resultsPerZone map[string][]any numInstances int zoneResultsQuorum bool zoneCount int @@ -114,13 +114,13 @@ func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int t.waitingByZone[instance.Zone]++ } t.minSuccessfulZones = len(t.waitingByZone) - maxUnavailableZones - t.resultsPerZone = make(map[string][]interface{}, len(t.waitingByZone)) + t.resultsPerZone = make(map[string][]any, len(t.waitingByZone)) t.zoneCount = len(t.waitingByZone) return t } -func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result interface{}, err error) { +func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result any, err error) { if err != nil { t.failuresByZone[instance.Zone]++ t.errors = append(t.errors, fmt.Errorf("(%s, %s) %w", instance.GetAddr(), instance.GetZone(), err)) @@ -128,7 +128,7 @@ func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result interface{} if _, ok := t.resultsPerZone[instance.Zone]; !ok { // If it is the first result in the zone, then total number of instances // in this zone should be number of waiting required. - t.resultsPerZone[instance.Zone] = make([]interface{}, 0, t.waitingByZone[instance.Zone]) + t.resultsPerZone[instance.Zone] = make([]any, 0, t.waitingByZone[instance.Zone]) } t.resultsPerZone[instance.Zone] = append(t.resultsPerZone[instance.Zone], result) } @@ -167,8 +167,8 @@ func (t *zoneAwareResultTracker) failedCompletely() bool { return allZonesFailed || (t.failed() && atLeastHalfOfFleetFailed) } -func (t *zoneAwareResultTracker) getResults() []interface{} { - results := make([]interface{}, 0, t.numInstances) +func (t *zoneAwareResultTracker) getResults() []any { + results := make([]any, 0, t.numInstances) if t.zoneResultsQuorum { for zone, waiting := range t.waitingByZone { // No need to check failuresByZone since tracker diff --git a/pkg/ring/replication_set_tracker_test.go b/pkg/ring/replication_set_tracker_test.go index e5ee5c9de16..b0e062e3083 100644 --- a/pkg/ring/replication_set_tracker_test.go +++ b/pkg/ring/replication_set_tracker_test.go @@ -130,7 +130,7 @@ func TestDefaultResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 2, 3}, tracker.getResults()) + assert.Equal(t, []any{1, 2, 3}, tracker.getResults()) }, }, "record and getResults2": { @@ -152,7 +152,7 @@ func TestDefaultResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{[]int{1, 1, 1}, []int{2, 2, 2}, []int{3, 3, 3}}, tracker.getResults()) + assert.Equal(t, []any{[]int{1, 1, 1}, []int{2, 2, 2}, []int{3, 3, 3}}, tracker.getResults()) }, }, "failedCompletely() should return true only if all instances have failed, regardless of max errors": { @@ -249,7 +249,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1}, tracker.getResults()) }, }, "should succeed once all 6 instances succeed on max unavailable zones = 0": { @@ -283,7 +283,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1, 1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1, 1, 1, 1}, tracker.getResults()) }, }, "should succeed once all 5 instances succeed on max unavailable zones = 1, zone results quorum disabled": { @@ -314,7 +314,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1, 1, 1}, tracker.getResults()) }, }, "should succeed once all 5 instances succeed on max unavailable zones = 1, zone results quorum enabled": { @@ -345,7 +345,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1, 1}, tracker.getResults()) }, }, "should fail on 1st failing instance on max unavailable zones = 0": { diff --git a/pkg/ring/ring.go b/pkg/ring/ring.go index 92c343d6849..6b121adea0f 100644 --- a/pkg/ring/ring.go +++ b/pkg/ring/ring.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/rand" + "slices" "sync" "time" @@ -19,7 +20,6 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" shardUtil "github.com/cortexproject/cortex/pkg/ring/shard" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -307,7 +307,7 @@ func (r *Ring) loop(ctx context.Context) error { r.updateRingMetrics(Different) r.mtx.Unlock() - r.KVClient.WatchKey(ctx, r.key, func(value interface{}) bool { + r.KVClient.WatchKey(ctx, r.key, func(value any) bool { if value == nil { level.Info(r.logger).Log("msg", "ring doesn't exist in KV store yet") return true @@ -327,7 +327,7 @@ func (r *Ring) updateRingState(ringDesc *Desc) { // Filter out all instances belonging to excluded zones. if len(r.cfg.ExcludedZones) > 0 { for instanceID, instance := range ringDesc.Ingesters { - if util.StringsContain(r.cfg.ExcludedZones, instance.Zone) { + if slices.Contains(r.cfg.ExcludedZones, instance.Zone) { delete(ringDesc.Ingesters, instanceID) } } @@ -411,7 +411,7 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts [ } // We want n *distinct* instances. - if util.StringsContain(distinctHosts, info.InstanceID) { + if slices.Contains(distinctHosts, info.InstanceID) { continue } @@ -589,10 +589,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro } else { // Calculate the number of required instances; // ensure we always require at least RF-1 when RF=3. - numRequired := len(r.ringDesc.Ingesters) - if numRequired < r.cfg.ReplicationFactor { - numRequired = r.cfg.ReplicationFactor - } + numRequired := max(len(r.ringDesc.Ingesters), r.cfg.ReplicationFactor) // We can tolerate this many failures numRequired -= r.cfg.ReplicationFactor / 2 diff --git a/pkg/ring/ring_test.go b/pkg/ring/ring_test.go index 682cb7d942d..55d5ff10582 100644 --- a/pkg/ring/ring_test.go +++ b/pkg/ring/ring_test.go @@ -6,6 +6,7 @@ import ( "fmt" "math" "math/rand" + "slices" "sort" "strconv" "strings" @@ -47,7 +48,7 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) { // Make a random ring with N instances, and M tokens per ingests desc := NewDesc() ring := &Desc{} - for i := 0; i < numInstances; i++ { + for i := range numInstances { tokens := g.GenerateTokens(ring, strconv.Itoa(i), "zone", numTokens, true) desc.AddIngester(fmt.Sprintf("%d", i), fmt.Sprintf("instance-%d", i), strconv.Itoa(i), tokens, ACTIVE, time.Now()) } @@ -88,9 +89,8 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) { for n, c := range tc { b.Run(n, func(b *testing.B) { // Generate a batch of N random keys, and look them up - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { generateKeys(rnd, numKeys, keys) err := DoBatch(ctx, Write, &r, c.exe, keys, callback, cleanup) require.NoError(b, err) @@ -100,7 +100,7 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) { } func generateKeys(r *rand.Rand, numTokens int, dest []uint32) { - for i := 0; i < numTokens; i++ { + for i := range numTokens { dest[i] = r.Uint32() } } @@ -136,7 +136,7 @@ func benchmarkUpdateRingState(b *testing.B, g TokenGenerator, numInstances, numZ // Also make a copy with different timestamps and one with different tokens desc := NewDesc() otherDesc := NewDesc() - for i := 0; i < numInstances; i++ { + for i := range numInstances { id := fmt.Sprintf("%d", i) tokens := g.GenerateTokens(desc, id, "zone", numTokens, true) now := time.Now() @@ -156,8 +156,8 @@ func benchmarkUpdateRingState(b *testing.B, g TokenGenerator, numInstances, numZ } flipFlop := true - b.ResetTimer() - for n := 0; n < b.N; n++ { + + for b.Loop() { if flipFlop { ring.updateRingState(desc) } else { @@ -285,7 +285,7 @@ func TestRing_Get_ZoneAwarenessWithIngesterLeaving(t *testing.T) { // Use the GenerateTokens to get an array of random uint32 values. testValues := g.GenerateTokens(r, "", "", testCount, true) - for i := 0; i < testCount; i++ { + for i := range testCount { set, err := ring.Get(testValues[i], Write, instancesList, bufHosts, bufZones) require.NoError(t, err) @@ -362,7 +362,7 @@ func TestRing_Get_ZoneAwarenessWithIngesterJoining(t *testing.T) { // Use the GenerateTokens to get an array of random uint32 values. testValues := g.GenerateTokens(ring.ringDesc, "", "", testCount, true) - for i := 0; i < testCount; i++ { + for i := range testCount { set, err := ring.Get(testValues[i], Write, instancesList, bufHosts, bufZones) require.NoError(t, err) @@ -467,7 +467,7 @@ func TestRing_Get_ZoneAwareness(t *testing.T) { var set ReplicationSet var err error - for i := 0; i < testCount; i++ { + for i := range testCount { set, err = ring.Get(testValues[i], Write, instances, bufHosts, bufZones) if testData.expectedErr != "" { require.EqualError(t, err, testData.expectedErr) @@ -557,12 +557,12 @@ func TestRing_Get_Stability(t *testing.T) { KVClient: &MockClient{}, } - for i := 0; i < numOfTokensToTest; i++ { + for i := range numOfTokensToTest { expectedSet, err := ring.Get(testValues[i], Write, bufDescs, bufHosts, bufZones) assert.NoError(t, err) assert.Equal(t, testData.replicationFactor, len(expectedSet.Instances)) - for j := 0; j < numOfInvocations; j++ { + for range numOfInvocations { newSet, err := ring.Get(testValues[i], Write, bufDescs, bufHosts, bufZones) assert.NoError(t, err) assert.Equal(t, expectedSet, newSet) @@ -687,7 +687,7 @@ func TestRing_Get_Consistency(t *testing.T) { ringDesc := &Desc{Ingesters: generateRingInstances(testData.initialInstances, testData.numZones, 128)} testValues := g.GenerateTokens(ringDesc, "", "", 128, true) bufDescs, bufHosts, bufZones := MakeBuffersForGet() - for i := 0; i < 128; i++ { + for i := range 128 { ring := Ring{ cfg: Config{ HeartbeatTimeout: time.Hour, @@ -971,9 +971,6 @@ func TestRing_GetAllHealthy(t *testing.T) { t.Run(testName, func(t *testing.T) { // Init the ring. ringDesc := &Desc{Ingesters: testData.ringInstances} - for id, instance := range ringDesc.Ingesters { - ringDesc.Ingesters[id] = instance - } ring := Ring{ cfg: Config{HeartbeatTimeout: heartbeatTimeout}, @@ -1193,9 +1190,6 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { t.Run(testName, func(t *testing.T) { // Init the ring. ringDesc := &Desc{Ingesters: testData.ringInstances} - for id, instance := range ringDesc.Ingesters { - ringDesc.Ingesters[id] = instance - } ring := Ring{ cfg: Config{ @@ -1782,7 +1776,7 @@ func TestRing_ShuffleShard_Stability(t *testing.T) { require.NoError(t, err) // Assert that multiple invocations generate the same exact shard. - for n := 0; n < numInvocations; n++ { + for range numInvocations { r := ring.ShuffleShard(tenantID, size) actual, err := r.GetAllHealthy(Read) require.NoError(t, err) @@ -1814,7 +1808,7 @@ func TestRing_ShuffleShard_Shuffling(t *testing.T) { // Initialise the ring instances. To have stable tests we generate tokens using a linear // distribution. Tokens within the same zone are evenly distributed too. instances := make(map[string]InstanceDesc, numInstances) - for i := 0; i < numInstances; i++ { + for i := range numInstances { id := fmt.Sprintf("instance-%d", i) instances[id] = InstanceDesc{ Addr: fmt.Sprintf("127.0.0.%d", i), @@ -1871,7 +1865,7 @@ func TestRing_ShuffleShard_Shuffling(t *testing.T) { numMatching := 0 for _, c := range currShard { - if util.StringsContain(otherShard, c) { + if slices.Contains(otherShard, c) { numMatching++ } } @@ -1944,7 +1938,7 @@ func TestRing_ShuffleShard_Consistency(t *testing.T) { // Compute the initial shard for each tenant. initial := map[int]ReplicationSet{} - for id := 0; id < numTenants; id++ { + for id := range numTenants { set, err := ring.ShuffleShard(fmt.Sprintf("%d", id), s.shardSize).GetAllHealthy(Read) require.NoError(t, err) initial[id] = set @@ -1971,7 +1965,7 @@ func TestRing_ShuffleShard_Consistency(t *testing.T) { // Compute the update shard for each tenant and compare it with the initial one. // If the "consistency" property is guaranteed, we expect no more then 1 different instance // in the updated shard. - for id := 0; id < numTenants; id++ { + for id := range numTenants { updated, err := ring.ShuffleShard(fmt.Sprintf("%d", id), s.shardSize).GetAllHealthy(Read) require.NoError(t, err) @@ -1986,7 +1980,7 @@ func TestRing_ShuffleShard_Consistency(t *testing.T) { func TestRing_ShuffleShard_ConsistencyOnShardSizeChanged(t *testing.T) { // Create 30 instances in 3 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 30; i++ { + for i := range 30 { name, desc := generateRingInstance(i, i%3, 128) ringInstances[name] = desc } @@ -2067,7 +2061,7 @@ func TestRing_ShuffleShard_ConsistencyOnShardSizeChanged(t *testing.T) { func TestRing_ShuffleShardWithZoneStability_ConsistencyOnShardSizeChanged(t *testing.T) { // Create 300 instances in 3 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 300; i++ { + for i := range 300 { name, desc := generateRingInstance(i, i%3, 128) ringInstances[name] = desc } @@ -2129,7 +2123,7 @@ func TestRing_ShuffleShardWithZoneStability_ConsistencyOnShardSizeChanged(t *tes func TestRing_ShuffleShard_ConsistencyOnZonesChanged(t *testing.T) { // Create 20 instances in 2 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 20; i++ { + for i := range 20 { name, desc := generateRingInstance(i, i%2, 128) ringInstances[name] = desc } @@ -2206,7 +2200,7 @@ func TestRing_ShuffleShard_ConsistencyOnZonesChanged(t *testing.T) { func TestRing_ShuffleShardWithZoneStability_ConsistencyOnZonesChanged(t *testing.T) { // Create 20 instances in 2 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 20; i++ { + for i := range 20 { name, desc := generateRingInstance(i, i%2, 128) ringInstances[name] = desc } @@ -2600,9 +2594,6 @@ func TestRing_ShuffleShardWithReadOnlyIngesters(t *testing.T) { t.Run(testName, func(t *testing.T) { // Init the ring. ringDesc := &Desc{Ingesters: testData.ringInstances} - for id, instance := range ringDesc.Ingesters { - ringDesc.Ingesters[id] = instance - } ring := Ring{ cfg: Config{ @@ -2823,9 +2814,7 @@ func benchmarkShuffleSharding(b *testing.B, numInstances, numZones, numTokens, s KVClient: &MockClient{}, } - b.ResetTimer() - - for n := 0; n < b.N; n++ { + for b.Loop() { ring.ShuffleShard("tenant-1", shardSize) } } @@ -2855,9 +2844,7 @@ func BenchmarkRing_Get(b *testing.B) { buf, bufHosts, bufZones := MakeBuffersForGet() r := rand.New(rand.NewSource(time.Now().UnixNano())) - b.ResetTimer() - - for n := 0; n < b.N; n++ { + for b.Loop() { set, err := ring.Get(r.Uint32(), Write, buf, bufHosts, bufZones) if err != nil || len(set.Instances) != replicationFactor { b.Fatal() @@ -3007,7 +2994,7 @@ func TestRingUpdates(t *testing.T) { } // Ensure the ring client got updated. - test.Poll(t, 1*time.Second, testData.expectedInstances, func() interface{} { + test.Poll(t, 1*time.Second, testData.expectedInstances, func() any { return ring.InstancesCount() }) @@ -3024,7 +3011,7 @@ func TestRingUpdates(t *testing.T) { // Ensure there's no instance in an excluded zone. if len(testData.excludedZones) > 0 { - assert.False(t, util.StringsContain(testData.excludedZones, ing.Zone)) + assert.False(t, slices.Contains(testData.excludedZones, ing.Zone)) } } @@ -3034,7 +3021,7 @@ func TestRingUpdates(t *testing.T) { } // Ensure the ring client got updated. - test.Poll(t, 1*time.Second, 0, func() interface{} { + test.Poll(t, 1*time.Second, 0, func() any { return ring.InstancesCount() }) }) @@ -3099,14 +3086,14 @@ func TestShuffleShardWithCaching(t *testing.T) { const zones = 3 lcs := []*Lifecycler(nil) - for i := 0; i < numLifecyclers; i++ { + for i := range numLifecyclers { lc := startLifecycler(t, cfg, 500*time.Millisecond, i, zones) lcs = append(lcs, lc) } // Wait until all instances in the ring are ACTIVE. - test.Poll(t, 5*time.Second, numLifecyclers, func() interface{} { + test.Poll(t, 5*time.Second, numLifecyclers, func() any { active := 0 rs, _ := ring.GetReplicationSetForOperation(Read) for _, ing := range rs.Instances { @@ -3127,7 +3114,7 @@ func TestShuffleShardWithCaching(t *testing.T) { // Do 100 iterations over two seconds. Make sure we get the same subring. const iters = 100 sleep := (2 * time.Second) / iters - for i := 0; i < iters; i++ { + for range iters { newSubring := ring.ShuffleShard(user, shardSize) require.True(t, subring == newSubring, "cached subring reused") require.Equal(t, shardSize, subring.InstancesCount()) @@ -3147,11 +3134,11 @@ func TestShuffleShardWithCaching(t *testing.T) { } // Now stop one lifecycler from each zone. Subring needs to be recomputed. - for i := 0; i < zones; i++ { + for i := range zones { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), lcs[i])) } - test.Poll(t, 5*time.Second, numLifecyclers-zones, func() interface{} { + test.Poll(t, 5*time.Second, numLifecyclers-zones, func() any { return ring.InstancesCount() }) diff --git a/pkg/ring/token_file_test.go b/pkg/ring/token_file_test.go index a456da5afe4..51b347b7bb7 100644 --- a/pkg/ring/token_file_test.go +++ b/pkg/ring/token_file_test.go @@ -12,7 +12,7 @@ import ( func TestTokenFile_Serialization(t *testing.T) { tokens := make(Tokens, 0, 512) - for i := 0; i < 512; i++ { + for range 512 { tokens = append(tokens, uint32(rand.Int31())) } tokenFile := TokenFile{ @@ -30,7 +30,7 @@ func TestTokenFile_Serialization(t *testing.T) { func TestTokenFile_Serialization_ForwardCompatibility(t *testing.T) { tokens := make(Tokens, 0, 512) - for i := 0; i < 512; i++ { + for range 512 { tokens = append(tokens, uint32(rand.Int31())) } b, err := oldMarshal(tokens) @@ -44,7 +44,7 @@ func TestTokenFile_Serialization_ForwardCompatibility(t *testing.T) { func TestTokenFile_Serialization_BackwardCompatibility(t *testing.T) { tokens := make(Tokens, 0, 512) - for i := 0; i < 512; i++ { + for range 512 { tokens = append(tokens, uint32(rand.Int31())) } tokenFile := TokenFile{ diff --git a/pkg/ring/token_generator.go b/pkg/ring/token_generator.go index 06d54cbfae2..59f3db23a3c 100644 --- a/pkg/ring/token_generator.go +++ b/pkg/ring/token_generator.go @@ -4,6 +4,7 @@ import ( "container/heap" "math" "math/rand" + "slices" "sort" "strings" "time" @@ -59,9 +60,7 @@ func (g *RandomTokenGenerator) GenerateTokens(ring *Desc, _, _ string, numTokens } // Ensure returned tokens are sorted. - sort.Slice(tokens, func(i, j int) bool { - return tokens[i] < tokens[j] - }) + slices.Sort(tokens) return tokens } @@ -235,9 +234,7 @@ func (g *MinimizeSpreadTokenGenerator) GenerateTokens(ring *Desc, id, zone strin } } - sort.Slice(r, func(i, j int) bool { - return r[i] < r[j] - }) + slices.Sort(r) return r } @@ -291,7 +288,7 @@ func tokenDistance(from, to uint32) int64 { } func findFirst(n int, f func(int) bool) int { - for i := 0; i < n; i++ { + for i := range n { if f(i) { return i } diff --git a/pkg/ring/token_generator_test.go b/pkg/ring/token_generator_test.go index 0c482f144d6..a76826eb429 100644 --- a/pkg/ring/token_generator_test.go +++ b/pkg/ring/token_generator_test.go @@ -59,7 +59,7 @@ func TestGenerateTokens_IgnoresOldTokens(t *testing.T) { d := NewDesc() dups := make(map[uint32]bool) - for i := 0; i < 500; i++ { + for i := range 500 { id := strconv.Itoa(i) zone := strconv.Itoa(i % 3) tokens := tc.tg.GenerateTokens(d, id, zone, 500, true) @@ -91,7 +91,7 @@ func TestMinimizeSpreadTokenGenerator(t *testing.T) { require.Equal(t, mTokenGenerator.called, len(zones)) // Should Generate tokens based on the ring state - for i := 0; i < 50; i++ { + for i := range 50 { generateTokensForIngesters(t, rindDesc, fmt.Sprintf("minimize-%v", i), zones, minimizeTokenGenerator, dups) assertDistancePerIngester(t, rindDesc, 0.01) } diff --git a/pkg/ring/tokens.go b/pkg/ring/tokens.go index 28c57d460d3..48accde1b14 100644 --- a/pkg/ring/tokens.go +++ b/pkg/ring/tokens.go @@ -21,7 +21,7 @@ func (t Tokens) Equals(other Tokens) bool { sort.Sort(mine) sort.Sort(other) - for i := 0; i < len(mine); i++ { + for i := range mine { if mine[i] != other[i] { return false } diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go index 695e33954a3..59e3303127e 100644 --- a/pkg/ruler/api.go +++ b/pkg/ruler/api.go @@ -68,7 +68,7 @@ type RuleGroup struct { Limit int64 `json:"limit"` } -type rule interface{} +type rule any type alertingRule struct { // State can be "pending", "firing", "inactive". @@ -404,7 +404,7 @@ var ( ErrBadRuleGroup = errors.New("unable to decoded rule group") ) -func marshalAndSend(output interface{}, w http.ResponseWriter, logger log.Logger) { +func marshalAndSend(output any, w http.ResponseWriter, logger log.Logger) { d, err := yaml.Marshal(&output) if err != nil { level.Error(logger).Log("msg", "error marshalling yaml rule groups", "err", err) diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go index e717ede2d08..2485f5b927d 100644 --- a/pkg/ruler/api_test.go +++ b/pkg/ruler/api_test.go @@ -206,31 +206,31 @@ func Test_stripEvaluationFields(t *testing.T) { // stripEvaluationFields sets evaluation-related fields of a rules API response to zero values. func stripEvaluationFields(t *testing.T, r util_api.Response) { - dataMap, ok := r.Data.(map[string]interface{}) + dataMap, ok := r.Data.(map[string]any) if !ok { t.Fatalf("expected map[string]interface{} got %T", r.Data) } - groups, ok := dataMap["groups"].([]interface{}) + groups, ok := dataMap["groups"].([]any) if !ok { t.Fatalf("expected []interface{} got %T", dataMap["groups"]) } for i := range groups { - group, ok := groups[i].(map[string]interface{}) + group, ok := groups[i].(map[string]any) if !ok { t.Fatalf("expected map[string]interface{} got %T", groups[i]) } group["evaluationTime"] = 0 group["lastEvaluation"] = "0001-01-01T00:00:00Z" - rules, ok := group["rules"].([]interface{}) + rules, ok := group["rules"].([]any) if !ok { t.Fatalf("expected []interface{} got %T", group["rules"]) } for i := range rules { - rule, ok := rules[i].(map[string]interface{}) + rule, ok := rules[i].(map[string]any) if !ok { t.Fatalf("expected map[string]interface{} got %T", rules[i]) } diff --git a/pkg/ruler/client_pool_test.go b/pkg/ruler/client_pool_test.go index 66fe273a685..20b5b178aa8 100644 --- a/pkg/ruler/client_pool_test.go +++ b/pkg/ruler/client_pool_test.go @@ -40,7 +40,7 @@ func Test_newRulerClientFactory(t *testing.T) { reg := prometheus.NewPedanticRegistry() factory := newRulerClientFactory(cfg, reg) - for i := 0; i < 2; i++ { + for range 2 { client, err := factory(listener.Addr().String()) require.NoError(t, err) defer client.Close() //nolint:errcheck diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 0b5c8826990..8357b6db767 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -274,12 +274,12 @@ func RecordAndReportRuleQueryMetrics(qf rules.QueryFunc, userID string, evalMetr queryChunkBytes.Add(float64(queryStats.FetchedChunkBytes)) queryDataBytes.Add(float64(queryStats.FetchedDataBytes)) // Log ruler query stats. - logMessage := []interface{}{ + logMessage := []any{ "msg", "query stats", "component", "ruler", } if origin := ctx.Value(promql.QueryOrigin{}); origin != nil { - queryLabels := origin.(map[string]interface{}) + queryLabels := origin.(map[string]any) rgMap := queryLabels["ruleGroup"].(map[string]string) logMessage = append(logMessage, "rule_group", rgMap["name"], diff --git a/pkg/ruler/external_labels_test.go b/pkg/ruler/external_labels_test.go index 1bc13a65831..86cc0bb153d 100644 --- a/pkg/ruler/external_labels_test.go +++ b/pkg/ruler/external_labels_test.go @@ -43,7 +43,6 @@ func TestUserExternalLabels(t *testing.T) { const userID = "test-user" for _, data := range tests { - data := data t.Run(data.name, func(t *testing.T) { if data.removeBeforeTest { e.remove(userID) diff --git a/pkg/ruler/lifecycle_test.go b/pkg/ruler/lifecycle_test.go index 030d71f79d8..bb09f6ef44b 100644 --- a/pkg/ruler/lifecycle_test.go +++ b/pkg/ruler/lifecycle_test.go @@ -36,7 +36,7 @@ func TestRulerShutdown(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, r) //nolint:errcheck // Wait until the tokens are registered in the ring - test.Poll(t, 100*time.Millisecond, config.Ring.NumTokens, func() interface{} { + test.Poll(t, 100*time.Millisecond, config.Ring.NumTokens, func() any { return numTokens(ringStore, "localhost", ringKey) }) @@ -45,7 +45,7 @@ func TestRulerShutdown(t *testing.T) { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), r)) // Wait until the tokens are unregistered from the ring - test.Poll(t, 100*time.Millisecond, 0, func() interface{} { + test.Poll(t, 100*time.Millisecond, 0, func() any { return numTokens(ringStore, "localhost", ringKey) }) } @@ -73,7 +73,7 @@ func TestRuler_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T) { // Add an unhealthy instance to the ring. tg := ring.NewRandomTokenGenerator() - require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, ringKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) instance := ringDesc.AddIngester(unhealthyInstanceID, "1.1.1.1", "", tg.GenerateTokens(ringDesc, unhealthyInstanceID, "", config.Ring.NumTokens, true), ring.ACTIVE, time.Now()) @@ -84,7 +84,7 @@ func TestRuler_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T) { })) // Ensure the unhealthy instance is removed from the ring. - test.Poll(t, time.Second*5, false, func() interface{} { + test.Poll(t, time.Second*5, false, func() any { d, err := ringStore.Get(ctx, ringKey) if err != nil { return err diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go index 2f691abe323..8a23b37f333 100644 --- a/pkg/ruler/manager.go +++ b/pkg/ruler/manager.go @@ -270,7 +270,7 @@ func (r *DefaultMultiTenantManager) createRulesManager(user string, ctx context. } func defaultRuleGroupIterationFunc(ctx context.Context, g *promRules.Group, evalTimestamp time.Time) { - logMessage := []interface{}{ + logMessage := []any{ "component", "ruler", "rule_group", g.Name(), "namespace", g.File(), diff --git a/pkg/ruler/manager_test.go b/pkg/ruler/manager_test.go index 9af478b2b4e..8abf3c29366 100644 --- a/pkg/ruler/manager_test.go +++ b/pkg/ruler/manager_test.go @@ -51,7 +51,7 @@ func TestSyncRuleGroups(t *testing.T) { mgr := getManager(m, user) require.NotNil(t, mgr) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return mgr.(*mockRulesManager).running.Load() }) @@ -72,7 +72,7 @@ func TestSyncRuleGroups(t *testing.T) { require.Nil(t, getManager(m, user)) // Make sure old manager was stopped. - test.Poll(t, 1*time.Second, false, func() interface{} { + test.Poll(t, 1*time.Second, false, func() any { return mgr.(*mockRulesManager).running.Load() }) @@ -94,7 +94,7 @@ func TestSyncRuleGroups(t *testing.T) { require.NotNil(t, newMgr) require.True(t, mgr != newMgr) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return newMgr.(*mockRulesManager).running.Load() }) @@ -107,7 +107,7 @@ func TestSyncRuleGroups(t *testing.T) { m.Stop() - test.Poll(t, 1*time.Second, false, func() interface{} { + test.Poll(t, 1*time.Second, false, func() any { return newMgr.(*mockRulesManager).running.Load() }) } @@ -167,7 +167,7 @@ func TestSlowRuleGroupSyncDoesNotSlowdownListRules(t *testing.T) { mgr := getManager(m, user) require.NotNil(t, mgr) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return mgr.(*mockRulesManager).running.Load() }) groups := m.GetRules(user) @@ -195,18 +195,18 @@ func TestSlowRuleGroupSyncDoesNotSlowdownListRules(t *testing.T) { groups = m.GetRules(user) require.Len(t, groups, len(groupsToReturn[0]), "expected %d but got %d", len(groupsToReturn[0]), len(groups)) - test.Poll(t, 5*time.Second, len(groupsToReturn[1]), func() interface{} { + test.Poll(t, 5*time.Second, len(groupsToReturn[1]), func() any { groups = m.GetRules(user) return len(groups) }) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return mgr.(*mockRulesManager).running.Load() }) m.Stop() - test.Poll(t, 1*time.Second, false, func() interface{} { + test.Poll(t, 1*time.Second, false, func() any { return mgr.(*mockRulesManager).running.Load() }) } diff --git a/pkg/ruler/mapper_test.go b/pkg/ruler/mapper_test.go index 10cb52aa285..d1688955423 100644 --- a/pkg/ruler/mapper_test.go +++ b/pkg/ruler/mapper_test.go @@ -3,6 +3,7 @@ package ruler import ( "net/url" "os" + "slices" "testing" "github.com/go-kit/log" @@ -296,8 +297,8 @@ func Test_mapper_MapRulesMultipleFiles(t *testing.T) { updated, files, err := m.MapRules(testUser, twoFilesRuleSet) require.True(t, updated) require.Len(t, files, 2) - require.True(t, sliceContains(t, fileOnePath, files)) - require.True(t, sliceContains(t, fileTwoPath, files)) + require.True(t, slices.Contains(files, fileOnePath)) + require.True(t, slices.Contains(files, fileTwoPath)) require.NoError(t, err) exists, err := afero.Exists(m.FS, fileOnePath) @@ -312,8 +313,8 @@ func Test_mapper_MapRulesMultipleFiles(t *testing.T) { updated, files, err := m.MapRules(testUser, twoFilesUpdatedRuleSet) require.True(t, updated) require.Len(t, files, 2) - require.True(t, sliceContains(t, fileOnePath, files)) - require.True(t, sliceContains(t, fileTwoPath, files)) + require.True(t, slices.Contains(files, fileOnePath)) + require.True(t, slices.Contains(files, fileTwoPath)) require.NoError(t, err) exists, err := afero.Exists(m.FS, fileOnePath) @@ -375,18 +376,6 @@ func Test_mapper_MapRulesSpecialCharNamespace(t *testing.T) { }) } -func sliceContains(t *testing.T, find string, in []string) bool { - t.Helper() - - for _, s := range in { - if find == s { - return true - } - } - - return false -} - func TestYamlFormatting(t *testing.T) { l := log.NewLogfmtLogger(os.Stdout) l = level.NewFilter(l, level.AllowInfo()) diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 7c38c8ab6e5..9b03acae15e 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -8,6 +8,7 @@ import ( "net/http" "net/url" "path/filepath" + "slices" "sort" "strings" "sync" @@ -180,7 +181,7 @@ type Config struct { // Validate config and returns error on failure func (cfg *Config) Validate(limits validation.Limits, log log.Logger) error { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -200,7 +201,7 @@ func (cfg *Config) Validate(limits validation.Limits, log log.Logger) error { return errInvalidMaxConcurrentEvals } - if !util.StringsContain(supportedQueryResponseFormats, cfg.QueryResponseFormat) { + if !slices.Contains(supportedQueryResponseFormats, cfg.QueryResponseFormat) { return errInvalidQueryResponseFormat } @@ -621,7 +622,7 @@ func (r *Ruler) nonPrimaryInstanceOwnsRuleGroup(g *rulespb.RuleGroupDesc, replic ctx, cancel := context.WithTimeout(ctx, r.cfg.LivenessCheckTimeout) defer cancel() - err := concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { + err := concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job any) error { addr := job.(string) rulerClient, err := r.GetClientFor(addr) if err != nil { @@ -910,13 +911,10 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulesp gLock := sync.Mutex{} ruleGroupCounts := make(map[string]int, len(userRings)) - concurrency := loadRulesConcurrency - if len(userRings) < concurrency { - concurrency = len(userRings) - } + concurrency := min(len(userRings), loadRulesConcurrency) g, gctx := errgroup.WithContext(ctx) - for i := 0; i < concurrency; i++ { + for range concurrency { g.Go(func() error { for userID := range userCh { groups, err := r.store.ListRuleGroupsForUserAndNamespace(gctx, userID, "") @@ -1387,7 +1385,7 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest } // Concurrently fetch rules from all rulers. jobs := concurrency.CreateJobsFromStrings(rulers.GetAddresses()) - err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { + err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job any) error { addr := job.(string) rulerClient, err := r.clientsPool.GetClientFor(addr) @@ -1533,7 +1531,7 @@ func (r *Ruler) ListAllRules(w http.ResponseWriter, req *http.Request) { } done := make(chan struct{}) - iter := make(chan interface{}) + iter := make(chan any) go func() { util.StreamWriteYAMLV3Response(w, iter, logger) diff --git a/pkg/ruler/ruler_ring.go b/pkg/ruler/ruler_ring.go index da87bede3ff..25249870389 100644 --- a/pkg/ruler/ruler_ring.go +++ b/pkg/ruler/ruler_ring.go @@ -163,10 +163,7 @@ func GetReplicationSetForListRule(r ring.ReadRing, cfg *RingConfig) (ring.Replic return ring.ReplicationSet{}, zoneFailures, ring.ErrTooManyUnhealthyInstances } } else { - numRequired := len(healthy) + len(unhealthy) - if numRequired < r.ReplicationFactor() { - numRequired = r.ReplicationFactor() - } + numRequired := max(len(healthy)+len(unhealthy), r.ReplicationFactor()) // quorum is not required so 1 replica is enough to handle the request numRequired -= r.ReplicationFactor() - 1 if len(healthy) < numRequired { diff --git a/pkg/ruler/ruler_ring_test.go b/pkg/ruler/ruler_ring_test.go index 7dd3cca9a98..3b388e456b5 100644 --- a/pkg/ruler/ruler_ring_test.go +++ b/pkg/ruler/ruler_ring_test.go @@ -262,7 +262,7 @@ func TestGetReplicationSetForListRule(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), rulerRing)) t.Cleanup(rulerRing.StopAsync) - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index c6a6b833b19..755eb49fa37 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -6,13 +6,14 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "net/http" "net/http/httptest" "net/url" "os" "reflect" - "sort" + "slices" "strings" "sync" "testing" @@ -491,7 +492,6 @@ func TestNotifierSendExternalLabels(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { limits.setRulerExternalLabels(labels.New(test.userExternalLabels...)) @@ -1319,7 +1319,7 @@ func TestGetRules(t *testing.T) { } if tc.sharding { - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1348,7 +1348,7 @@ func TestGetRules(t *testing.T) { if tc.sharding { // update the State of the rulers in the ring based on tc.rulerStateMap - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1551,7 +1551,7 @@ func TestGetRulesFromBackup(t *testing.T) { } } - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1578,7 +1578,7 @@ func TestGetRulesFromBackup(t *testing.T) { }) // update the State of the rulers in the ring based on tc.rulerStateMap - err = kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1768,7 +1768,7 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { } } - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1795,7 +1795,7 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { }) // update the State of the rulers in the ring based on tc.rulerStateMap - err = kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -2376,7 +2376,7 @@ func TestSharding(t *testing.T) { } if tc.setupRing != nil { - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -2444,9 +2444,7 @@ func userToken(user string, skip int) uint32 { } func sortTokens(tokens []uint32) []uint32 { - sort.Slice(tokens, func(i, j int) bool { - return tokens[i] < tokens[j] - }) + slices.Sort(tokens) return tokens } @@ -2505,7 +2503,7 @@ func Test_LoadPartialGroups(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), r1)) t.Cleanup(r1.StopAsync) - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -2516,7 +2514,7 @@ func Test_LoadPartialGroups(t *testing.T) { require.NoError(t, err) - test.Poll(t, time.Second*5, true, func() interface{} { + test.Poll(t, time.Second*5, true, func() any { return len(r1.manager.GetRules(user2)) > 0 && len(r1.manager.GetRules(user3)) > 0 }) @@ -2728,7 +2726,6 @@ func TestSendAlerts(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) { if len(tc.in) == 0 { @@ -3053,7 +3050,7 @@ func TestRulerDisablesRuleGroups(t *testing.T) { } if tc.setupRing != nil { - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -3087,9 +3084,7 @@ func TestRulerDisablesRuleGroups(t *testing.T) { if loaded == nil { loaded = map[string]rulespb.RuleGroupList{} } - for k, v := range loaded { - actualRules[k] = v - } + maps.Copy(actualRules, loaded) } } @@ -3242,7 +3237,7 @@ func TestGetShardSizeForUser(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.lifecycler)) } - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client.go b/pkg/ruler/rulestore/bucketclient/bucket_client.go index 049524f5005..8515ad910a7 100644 --- a/pkg/ruler/rulestore/bucketclient/bucket_client.go +++ b/pkg/ruler/rulestore/bucketclient/bucket_client.go @@ -182,7 +182,7 @@ func (b *BucketRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[s // download all rule groups in parallel. We limit the number of workers to avoid a // particular user having too many rule groups rate limiting us with the object storage. g, gCtx := errgroup.WithContext(ctx) - for i := 0; i < loadConcurrency; i++ { + for range loadConcurrency { g.Go(func() error { for gr := range ch { user, namespace, group := gr.GetUser(), gr.GetNamespace(), gr.GetName() diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go index 460a8821180..0afa4b155cb 100644 --- a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go +++ b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go @@ -28,7 +28,7 @@ type testGroup struct { } func TestListRules(t *testing.T) { - runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ interface{}) { + runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ any) { groups := []testGroup{ {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup"}}, {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "second testGroup"}}, @@ -132,7 +132,7 @@ func TestLoadPartialRules(t *testing.T) { } func TestLoadRules(t *testing.T) { - runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ interface{}) { + runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ any) { groups := []testGroup{ {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.Rule{{ For: model.Duration(5 * time.Minute), @@ -201,7 +201,7 @@ func TestLoadRules(t *testing.T) { } func TestDelete(t *testing.T) { - runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, bucketClient interface{}) { + runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, bucketClient any) { groups := []testGroup{ {user: "user1", namespace: "A", ruleGroup: rulefmt.RuleGroup{Name: "1"}}, {user: "user1", namespace: "A", ruleGroup: rulefmt.RuleGroup{Name: "2"}}, @@ -261,13 +261,13 @@ func TestDelete(t *testing.T) { }) } -func runForEachRuleStore(t *testing.T, testFn func(t *testing.T, store rulestore.RuleStore, bucketClient interface{})) { +func runForEachRuleStore(t *testing.T, testFn func(t *testing.T, store rulestore.RuleStore, bucketClient any)) { bucketClient := objstore.NewInMemBucket() bucketStore := NewBucketRuleStore(bucketClient, nil, log.NewNopLogger()) stores := map[string]struct { store rulestore.RuleStore - client interface{} + client any }{ "bucket": {store: bucketStore, client: bucketClient}, } @@ -279,7 +279,7 @@ func runForEachRuleStore(t *testing.T, testFn func(t *testing.T, store rulestore } } -func getSortedObjectKeys(bucketClient interface{}) []string { +func getSortedObjectKeys(bucketClient any) []string { if typed, ok := bucketClient.(*objstore.InMemBucket); ok { var keys []string for key := range typed.Objects() { diff --git a/pkg/scheduler/queue/queue_test.go b/pkg/scheduler/queue/queue_test.go index 98a1ea05823..36aa97c98a7 100644 --- a/pkg/scheduler/queue/queue_test.go +++ b/pkg/scheduler/queue/queue_test.go @@ -23,7 +23,7 @@ func BenchmarkGetNextRequest(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) - for n := 0; n < b.N; n++ { + for b.Loop() { queue := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -32,12 +32,12 @@ func BenchmarkGetNextRequest(b *testing.B) { ) queues = append(queues, queue) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { queue.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for range maxOutstandingPerTenant { + for j := range numTenants { userID := strconv.Itoa(j) err := queue.EnqueueRequest(userID, MockRequest{}, 0, nil) @@ -49,11 +49,10 @@ func BenchmarkGetNextRequest(b *testing.B) { } ctx := context.Background() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { idx := FirstUser() - for j := 0; j < maxOutstandingPerTenant*numTenants; j++ { + for range maxOutstandingPerTenant * numTenants { querier := "" b: // Find querier with at least one request to avoid blocking in getNextRequestForQuerier. @@ -82,7 +81,7 @@ func BenchmarkQueueRequest(b *testing.B) { users := make([]string, 0, numTenants) requests := make([]MockRequest, 0, numTenants) - for n := 0; n < b.N; n++ { + for n := 0; b.Loop(); n++ { q := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -90,22 +89,21 @@ func BenchmarkQueueRequest(b *testing.B) { nil, ) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { q.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } queues = append(queues, q) - for j := 0; j < numTenants; j++ { + for j := range numTenants { requests = append(requests, MockRequest{id: fmt.Sprintf("%d-%d", n, j)}) users = append(users, strconv.Itoa(j)) } } - b.ResetTimer() - for n := 0; n < b.N; n++ { - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for n := 0; b.Loop(); n++ { + for range maxOutstandingPerTenant { + for j := range numTenants { err := queues[n].EnqueueRequest(users[j], requests[j], 0, nil) if err != nil { b.Fatal(err) @@ -122,7 +120,7 @@ func BenchmarkGetNextRequestPriorityQueue(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) - for n := 0; n < b.N; n++ { + for b.Loop() { queue := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -131,12 +129,12 @@ func BenchmarkGetNextRequestPriorityQueue(b *testing.B) { ) queues = append(queues, queue) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { queue.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for i := range maxOutstandingPerTenant { + for j := range numTenants { userID := strconv.Itoa(j) err := queue.EnqueueRequest(userID, MockRequest{priority: int64(i)}, 0, nil) @@ -148,11 +146,10 @@ func BenchmarkGetNextRequestPriorityQueue(b *testing.B) { } ctx := context.Background() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { idx := FirstUser() - for j := 0; j < maxOutstandingPerTenant*numTenants; j++ { + for range maxOutstandingPerTenant * numTenants { querier := "" b: // Find querier with at least one request to avoid blocking in getNextRequestForQuerier. @@ -181,7 +178,7 @@ func BenchmarkQueueRequestPriorityQueue(b *testing.B) { users := make([]string, 0, numTenants) requests := make([]MockRequest, 0, numTenants) - for n := 0; n < b.N; n++ { + for n := 0; b.Loop(); n++ { q := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -189,22 +186,21 @@ func BenchmarkQueueRequestPriorityQueue(b *testing.B) { nil, ) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { q.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } queues = append(queues, q) - for j := 0; j < numTenants; j++ { + for j := range numTenants { requests = append(requests, MockRequest{id: fmt.Sprintf("%d-%d", n, j), priority: int64(j)}) users = append(users, strconv.Itoa(j)) } } - b.ResetTimer() - for n := 0; n < b.N; n++ { - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for n := 0; b.Loop(); n++ { + for range maxOutstandingPerTenant { + for j := range numTenants { err := queues[n].EnqueueRequest(users[j], requests[j], 0, nil) if err != nil { b.Fatal(err) diff --git a/pkg/scheduler/queue/user_queues.go b/pkg/scheduler/queue/user_queues.go index ee1ce9e8047..6a9c24b2c20 100644 --- a/pkg/scheduler/queue/user_queues.go +++ b/pkg/scheduler/queue/user_queues.go @@ -374,7 +374,7 @@ func shuffleQueriersForUser(userSeed int64, queriersToSelect int, allSortedQueri scratchpad = append(scratchpad, allSortedQueriers...) last := len(scratchpad) - 1 - for i := 0; i < queriersToSelect; i++ { + for range queriersToSelect { r := rnd.Intn(last + 1) queriers[scratchpad[r]] = struct{}{} scratchpad[r], scratchpad[last] = scratchpad[last], scratchpad[r] @@ -393,7 +393,7 @@ func getPriorityList(queryPriority validation.QueryPriority, totalQuerierCount i for _, priority := range queryPriority.Priorities { reservedQuerierShardSize := util.DynamicShardSize(priority.ReservedQueriers, totalQuerierCount) - for i := 0; i < reservedQuerierShardSize; i++ { + for range reservedQuerierShardSize { priorityList = append(priorityList, priority.Priority) } } diff --git a/pkg/scheduler/queue/user_queues_test.go b/pkg/scheduler/queue/user_queues_test.go index 0c242eafa7c..f70287d51a4 100644 --- a/pkg/scheduler/queue/user_queues_test.go +++ b/pkg/scheduler/queue/user_queues_test.go @@ -80,7 +80,7 @@ func TestQueuesWithQueriers(t *testing.T) { maxQueriersPerUser := 5 // Add some queriers. - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { qid := fmt.Sprintf("querier-%d", ix) uq.addQuerierConnection(qid) @@ -93,7 +93,7 @@ func TestQueuesWithQueriers(t *testing.T) { assert.NoError(t, isConsistent(uq)) // Add user queues. - for u := 0; u < users; u++ { + for u := range users { uid := fmt.Sprintf("user-%d", u) getOrAdd(t, uq, uid, maxQueriersPerUser) @@ -106,7 +106,7 @@ func TestQueuesWithQueriers(t *testing.T) { // and compute mean and stdDev. queriersMap := make(map[string]int) - for q := 0; q < queriers; q++ { + for q := range queriers { qid := fmt.Sprintf("querier-%d", q) lastUserIndex := -1 @@ -158,7 +158,7 @@ func TestQueuesConsistency(t *testing.T) { conns := map[string]int{} - for i := 0; i < 10000; i++ { + for i := range 10000 { switch r.Int() % 6 { case 0: assert.NotNil(t, uq.getOrAddQueue(generateTenant(r), 3)) @@ -208,7 +208,7 @@ func TestQueues_ForgetDelay(t *testing.T) { } // Add user queues. - for i := 0; i < numUsers; i++ { + for i := range numUsers { userID := fmt.Sprintf("user-%d", i) getOrAdd(t, uq, userID, maxQueriersPerUser) } @@ -300,7 +300,7 @@ func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForget } // Add user queues. - for i := 0; i < numUsers; i++ { + for i := range numUsers { userID := fmt.Sprintf("user-%d", i) getOrAdd(t, uq, userID, maxQueriersPerUser) } @@ -446,7 +446,7 @@ func TestGetOrAddQueueShouldUpdateProperties(t *testing.T) { assert.IsType(t, &FIFORequestQueue{}, queue) // check the queriers and reservedQueriers map are consistent - for i := 0; i < 100; i++ { + for range 100 { queriers := q.userQueues["userID"].queriers reservedQueriers := q.userQueues["userID"].reservedQueriers q.userQueues["userID"].maxQueriers = 0 // reset to trigger querier assignment @@ -473,7 +473,7 @@ func TestQueueConcurrency(t *testing.T) { var wg sync.WaitGroup wg.Add(numGoRoutines) - for i := 0; i < numGoRoutines; i++ { + for i := range numGoRoutines { go func(cnt int) { defer wg.Done() queue := q.getOrAddQueue("userID", 2) @@ -577,7 +577,7 @@ func getUsersByQuerier(queues *queues, querierID string) []string { return userIDs } -func getKeys(x interface{}) []string { +func getKeys(x any) []string { var keys []string switch i := x.(type) { @@ -620,14 +620,14 @@ func TestShuffleQueriersCorrectness(t *testing.T) { const queriersCount = 100 var allSortedQueriers []string - for i := 0; i < queriersCount; i++ { + for i := range queriersCount { allSortedQueriers = append(allSortedQueriers, fmt.Sprintf("%d", i)) } sort.Strings(allSortedQueriers) r := rand.New(rand.NewSource(time.Now().UnixNano())) const tests = 1000 - for i := 0; i < tests; i++ { + for range tests { toSelect := r.Intn(queriersCount) if toSelect == 0 { toSelect = 3 diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 9c1d75ad51a..d1f8e8c3fec 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -179,7 +179,7 @@ func TestSchedulerEnqueueWithFrontendDisconnect(t *testing.T) { }) // Wait until the frontend has connected to the scheduler. - test.Poll(t, time.Second, float64(1), func() interface{} { + test.Poll(t, time.Second, float64(1), func() any { return promtest.ToFloat64(scheduler.connectedFrontendClients) }) @@ -187,7 +187,7 @@ func TestSchedulerEnqueueWithFrontendDisconnect(t *testing.T) { require.NoError(t, frontendLoop.CloseSend()) // Wait until the frontend has disconnected. - test.Poll(t, time.Second, float64(0), func() interface{} { + test.Poll(t, time.Second, float64(0), func() any { return promtest.ToFloat64(scheduler.connectedFrontendClients) }) @@ -317,7 +317,7 @@ func TestSchedulerShutdown_QuerierLoop(t *testing.T) { func TestSchedulerMaxOutstandingRequests(t *testing.T) { _, frontendClient, _ := setupScheduler(t, nil) - for i := 0; i < testMaxOutstandingPerTenant; i++ { + for i := range testMaxOutstandingPerTenant { // coming from different frontends fl := initFrontendLoop(t, frontendClient, fmt.Sprintf("frontend-%d", i)) require.NoError(t, fl.Send(&schedulerpb.FrontendToScheduler{ @@ -395,7 +395,7 @@ func TestSchedulerForwardsErrorToFrontend(t *testing.T) { require.NoError(t, querierLoop.CloseSend()) // Verify that frontend was notified about request. - test.Poll(t, 2*time.Second, true, func() interface{} { + test.Poll(t, 2*time.Second, true, func() any { resp := fm.getRequest(100) if resp == nil { return false @@ -474,7 +474,7 @@ func frontendToScheduler(t *testing.T, frontendLoop schedulerpb.SchedulerForFron // If this verification succeeds, there will be leaked goroutine left behind. It will be cleaned once grpc server is shut down. func verifyQuerierDoesntReceiveRequest(t *testing.T, querierLoop schedulerpb.SchedulerForQuerier_QuerierLoopClient, timeout time.Duration) { - ch := make(chan interface{}, 1) + ch := make(chan any, 1) go func() { m, e := querierLoop.Recv() @@ -494,7 +494,7 @@ func verifyQuerierDoesntReceiveRequest(t *testing.T, querierLoop schedulerpb.Sch } func verifyNoPendingRequestsLeft(t *testing.T, scheduler *Scheduler) { - test.Poll(t, 1*time.Second, 0, func() interface{} { + test.Poll(t, 1*time.Second, 0, func() any { scheduler.pendingRequestsMu.Lock() defer scheduler.pendingRequestsMu.Unlock() return len(scheduler.pendingRequests) diff --git a/pkg/storage/bucket/azure/config_test.go b/pkg/storage/bucket/azure/config_test.go index 8ae29ec4507..5cdf00fb9ac 100644 --- a/pkg/storage/bucket/azure/config_test.go +++ b/pkg/storage/bucket/azure/config_test.go @@ -85,7 +85,6 @@ http: } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := Config{} diff --git a/pkg/storage/bucket/bucket_util.go b/pkg/storage/bucket/bucket_util.go index 6b48a5ffb28..c068d086952 100644 --- a/pkg/storage/bucket/bucket_util.go +++ b/pkg/storage/bucket/bucket_util.go @@ -22,7 +22,7 @@ func DeletePrefix(ctx context.Context, bkt objstore.Bucket, prefix string, logge } result := atomic.NewInt32(0) - err = concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(keys), maxConcurrency, func(ctx context.Context, key interface{}) error { + err = concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(keys), maxConcurrency, func(ctx context.Context, key any) error { name := key.(string) if err := bkt.Delete(ctx, name); err != nil { return err diff --git a/pkg/storage/bucket/bucket_util_test.go b/pkg/storage/bucket/bucket_util_test.go index 3a5c4ab7213..f48184b7d4f 100644 --- a/pkg/storage/bucket/bucket_util_test.go +++ b/pkg/storage/bucket/bucket_util_test.go @@ -38,7 +38,7 @@ func TestDeletePrefixConcurrent(t *testing.T) { require.NoError(t, mem.Upload(context.Background(), "prefix/sub2/4", strings.NewReader("hello"))) require.NoError(t, mem.Upload(context.Background(), "outside/obj", strings.NewReader("hello"))) n := 10000 - for i := 0; i < n; i++ { + for i := range n { require.NoError(t, mem.Upload(context.Background(), fmt.Sprintf("prefix/sub/%d", i), strings.NewReader(fmt.Sprintf("hello%d", i)))) } diff --git a/pkg/storage/bucket/client.go b/pkg/storage/bucket/client.go index b7dc57f9f29..e13a49593c0 100644 --- a/pkg/storage/bucket/client.go +++ b/pkg/storage/bucket/client.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "net/http" + "slices" "strings" "github.com/go-kit/log" @@ -18,7 +19,6 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket/gcs" "github.com/cortexproject/cortex/pkg/storage/bucket/s3" "github.com/cortexproject/cortex/pkg/storage/bucket/swift" - "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -90,7 +90,7 @@ func (cfg *Config) RegisterFlagsWithPrefixAndBackend(prefix string, f *flag.Flag } func (cfg *Config) Validate() error { - if !util.StringsContain(cfg.supportedBackends(), cfg.Backend) { + if !slices.Contains(cfg.supportedBackends(), cfg.Backend) { return ErrUnsupportedStorageBackend } diff --git a/pkg/storage/bucket/client_test.go b/pkg/storage/bucket/client_test.go index 78b2ea3db20..f58312b44ff 100644 --- a/pkg/storage/bucket/client_test.go +++ b/pkg/storage/bucket/client_test.go @@ -76,7 +76,6 @@ func TestNewClient(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Load config @@ -145,7 +144,7 @@ func TestClientMock_MockGet(t *testing.T) { // Run many goroutines all requesting the same mocked object and // ensure there's no race. wg := sync.WaitGroup{} - for i := 0; i < 1000; i++ { + for range 1000 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/storage/bucket/http/config_test.go b/pkg/storage/bucket/http/config_test.go index 2203a52acb3..3594dcd7521 100644 --- a/pkg/storage/bucket/http/config_test.go +++ b/pkg/storage/bucket/http/config_test.go @@ -66,7 +66,6 @@ max_connections_per_host: 8 } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := Config{} diff --git a/pkg/storage/bucket/s3/config.go b/pkg/storage/bucket/s3/config.go index df5bd33ab29..f5778f3435d 100644 --- a/pkg/storage/bucket/s3/config.go +++ b/pkg/storage/bucket/s3/config.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "net/http" + "slices" "strings" "github.com/minio/minio-go/v7/pkg/encrypt" @@ -12,7 +13,6 @@ import ( "github.com/thanos-io/objstore/providers/s3" bucket_http "github.com/cortexproject/cortex/pkg/storage/bucket/http" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -103,14 +103,14 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { // Validate config and returns error on failure func (cfg *Config) Validate() error { - if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) { + if !slices.Contains(supportedSignatureVersions, cfg.SignatureVersion) { return errUnsupportedSignatureVersion } - if !util.StringsContain(supportedBucketLookupTypes, cfg.BucketLookupType) { + if !slices.Contains(supportedBucketLookupTypes, cfg.BucketLookupType) { return errInvalidBucketLookupType } if cfg.ListObjectsVersion != "" { - if !util.StringsContain(supportedListObjectsVersion, cfg.ListObjectsVersion) { + if !slices.Contains(supportedListObjectsVersion, cfg.ListObjectsVersion) { return errInvalidListObjectsVersion } } @@ -155,7 +155,7 @@ func (cfg *SSEConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { } func (cfg *SSEConfig) Validate() error { - if cfg.Type != "" && !util.StringsContain(supportedSSETypes, cfg.Type) { + if cfg.Type != "" && !slices.Contains(supportedSSETypes, cfg.Type) { return errUnsupportedSSEType } diff --git a/pkg/storage/bucket/s3/config_test.go b/pkg/storage/bucket/s3/config_test.go index a01a8a07b7e..122f1eeac8d 100644 --- a/pkg/storage/bucket/s3/config_test.go +++ b/pkg/storage/bucket/s3/config_test.go @@ -110,7 +110,6 @@ http: } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := Config{} diff --git a/pkg/storage/tsdb/bucketindex/loader_test.go b/pkg/storage/tsdb/bucketindex/loader_test.go index 088e97818a4..482fc43a6c6 100644 --- a/pkg/storage/tsdb/bucketindex/loader_test.go +++ b/pkg/storage/tsdb/bucketindex/loader_test.go @@ -64,7 +64,7 @@ func TestLoader_GetIndex_ShouldLazyLoadBucketIndex(t *testing.T) { )) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { actualIdx, _, err := loader.GetIndex(ctx, "user-1") require.NoError(t, err) assert.Equal(t, idx, actualIdx) @@ -104,7 +104,7 @@ func TestLoader_GetIndex_ShouldCacheError(t *testing.T) { require.NoError(t, bkt.Upload(ctx, path.Join("user-1", IndexCompressedFilename), strings.NewReader("invalid!}"))) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { _, _, err := loader.GetIndex(ctx, "user-1") require.Equal(t, ErrIndexCorrupted, err) } @@ -140,7 +140,7 @@ func TestLoader_GetIndex_ShouldCacheIndexNotFoundError(t *testing.T) { }) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { _, _, err := loader.GetIndex(ctx, "user-1") require.Equal(t, ErrIndexNotFound, err) } @@ -242,7 +242,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousLoadSuccess(t *testing.T) require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, 2, func() interface{} { + test.Poll(t, 3*time.Second, 2, func() any { actualIdx, _, err := loader.GetIndex(ctx, "user-1") if err != nil { return 0 @@ -305,7 +305,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousLoadFailure(t *testing.T) require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { _, _, err := loader.GetIndex(ctx, "user-1") return err }) @@ -358,7 +358,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousIndexNotFound(t *testing. require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { _, _, err := loader.GetIndex(ctx, "user-1") return err }) @@ -415,7 +415,7 @@ func TestLoader_ShouldNotCacheCriticalErrorOnBackgroundUpdates(t *testing.T) { require.NoError(t, bkt.Upload(ctx, path.Join("user-1", IndexCompressedFilename), strings.NewReader("invalid!}"))) // Wait until the first failure has been tracked. - test.Poll(t, 3*time.Second, true, func() interface{} { + test.Poll(t, 3*time.Second, true, func() any { return testutil.ToFloat64(loader.loadFailures) > 0 }) @@ -472,7 +472,7 @@ func TestLoader_ShouldCacheIndexNotFoundOnBackgroundUpdates(t *testing.T) { // Wait until the next index load attempt occurs. prevLoads := testutil.ToFloat64(loader.loadAttempts) - test.Poll(t, 3*time.Second, true, func() interface{} { + test.Poll(t, 3*time.Second, true, func() any { return testutil.ToFloat64(loader.loadAttempts) > prevLoads }) @@ -531,7 +531,7 @@ func TestLoader_ShouldOffloadIndexIfNotFoundDuringBackgroundUpdates(t *testing.T require.NoError(t, DeleteIndex(ctx, bkt, "user-1", nil)) // Wait until the index is offloaded. - test.Poll(t, 3*time.Second, float64(0), func() interface{} { + test.Poll(t, 3*time.Second, float64(0), func() any { return testutil.ToFloat64(loader.loaded) }) @@ -583,7 +583,7 @@ func TestLoader_ShouldOffloadIndexIfIdleTimeoutIsReachedDuringBackgroundUpdates( assert.Equal(t, idx, actualIdx) // Wait until the index is offloaded. - test.Poll(t, 3*time.Second, float64(0), func() interface{} { + test.Poll(t, 3*time.Second, float64(0), func() any { return testutil.ToFloat64(loader.loaded) }) @@ -679,7 +679,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousKeyAccessDenied(t *testin require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { _, _, err := loader.GetIndex(ctx, "user-1") // Check cached require.NoError(t, loader.checkCachedIndexes(ctx)) @@ -724,7 +724,7 @@ func TestLoader_GetIndex_ShouldCacheKeyDeniedErrors(t *testing.T) { }) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { _, _, err := loader.GetIndex(ctx, "user-1") require.True(t, errors.Is(err, bucket.ErrCustomerManagedKeyAccessDenied)) } diff --git a/pkg/storage/tsdb/bucketindex/storage_test.go b/pkg/storage/tsdb/bucketindex/storage_test.go index e10d910e088..55f31672e80 100644 --- a/pkg/storage/tsdb/bucketindex/storage_test.go +++ b/pkg/storage/tsdb/bucketindex/storage_test.go @@ -115,7 +115,7 @@ func BenchmarkReadIndex(b *testing.B) { // Mock some blocks and deletion marks in the storage. bkt = BucketWithGlobalMarkers(bkt) - for i := 0; i < numBlocks; i++ { + for i := range numBlocks { minT := int64(i * 10) maxT := int64((i + 1) * 10) @@ -138,9 +138,7 @@ func BenchmarkReadIndex(b *testing.B) { require.Len(b, idx.Blocks, numBlocks) require.Len(b, idx.BlockDeletionMarks, numBlockDeletionMarks) - b.ResetTimer() - - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ReadIndex(ctx, bkt, userID, nil, logger) require.NoError(b, err) } diff --git a/pkg/storage/tsdb/caching_bucket.go b/pkg/storage/tsdb/caching_bucket.go index 2efa11cff80..404438033aa 100644 --- a/pkg/storage/tsdb/caching_bucket.go +++ b/pkg/storage/tsdb/caching_bucket.go @@ -5,6 +5,7 @@ import ( "fmt" "path/filepath" "regexp" + "slices" "strings" "time" @@ -21,8 +22,6 @@ import ( "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/model" storecache "github.com/thanos-io/thanos/pkg/store/cache" - - "github.com/cortexproject/cortex/pkg/util" ) var ( @@ -62,7 +61,7 @@ func (cfg *BucketCacheBackend) Validate() error { } for _, backend := range splitBackends { - if !util.StringsContain(supportedBucketCacheBackends, backend) { + if !slices.Contains(supportedBucketCacheBackends, backend) { return errUnsupportedBucketCacheBackend } @@ -132,10 +131,7 @@ func (cfg *InMemoryBucketCacheConfig) toInMemoryCacheConfig() cache.InMemoryCach maxCacheSize := model.Bytes(cfg.MaxSizeBytes) // Calculate the max item size. - maxItemSize := defaultMaxItemSize - if maxItemSize > maxCacheSize { - maxItemSize = maxCacheSize - } + maxItemSize := min(defaultMaxItemSize, maxCacheSize) return cache.InMemoryCacheConfig{ MaxSize: maxCacheSize, diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index cc1be08b13b..b51ad077bd4 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -4,6 +4,7 @@ import ( "flag" "fmt" "path/filepath" + "slices" "strings" "time" @@ -15,7 +16,6 @@ import ( "github.com/thanos-io/thanos/pkg/store" "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" ) @@ -409,10 +409,10 @@ func (cfg *BucketStoreConfig) Validate() error { if err != nil { return errors.Wrap(err, "parquet-labels-cache configuration") } - if !util.StringsContain(supportedBlockDiscoveryStrategies, cfg.BlockDiscoveryStrategy) { + if !slices.Contains(supportedBlockDiscoveryStrategies, cfg.BlockDiscoveryStrategy) { return ErrInvalidBucketIndexBlockDiscoveryStrategy } - if !util.StringsContain(supportedTokenBucketBytesLimiterModes, cfg.TokenBucketBytesLimiter.Mode) { + if !slices.Contains(supportedTokenBucketBytesLimiterModes, cfg.TokenBucketBytesLimiter.Mode) { return ErrInvalidTokenBucketBytesLimiterMode } if cfg.LazyExpandedPostingGroupMaxKeySeriesRatio < 0 { diff --git a/pkg/storage/tsdb/config_test.go b/pkg/storage/tsdb/config_test.go index 35f8284d490..7a642cc6006 100644 --- a/pkg/storage/tsdb/config_test.go +++ b/pkg/storage/tsdb/config_test.go @@ -148,7 +148,6 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := &BlocksStorageConfig{} diff --git a/pkg/storage/tsdb/expanded_postings_cache_test.go b/pkg/storage/tsdb/expanded_postings_cache_test.go index 333396b52b7..abe04474020 100644 --- a/pkg/storage/tsdb/expanded_postings_cache_test.go +++ b/pkg/storage/tsdb/expanded_postings_cache_test.go @@ -64,7 +64,7 @@ func Test_ShouldFetchPromiseOnlyOnce(t *testing.T) { return 0, 0, nil } - for i := 0; i < 100; i++ { + for range 100 { go func() { defer wg.Done() cache.getPromiseForKey("key1", fetchFunc) @@ -126,7 +126,7 @@ func TestFifoCacheExpire(t *testing.T) { timeNow := time.Now cache := newFifoCache[int](c.cfg, "test", m, timeNow) - for i := 0; i < numberOfKeys; i++ { + for i := range numberOfKeys { key := RepeatStringIfNeeded(fmt.Sprintf("key%d", i), keySize) p, loaded := cache.getPromiseForKey(key, func() (int, int64, error) { return 1, 8, nil @@ -143,7 +143,7 @@ func TestFifoCacheExpire(t *testing.T) { totalCacheSize := 0 - for i := 0; i < numberOfKeys; i++ { + for i := range numberOfKeys { key := RepeatStringIfNeeded(fmt.Sprintf("key%d", i), keySize) if cache.contains(key) { totalCacheSize++ @@ -167,7 +167,7 @@ func TestFifoCacheExpire(t *testing.T) { return timeNow().Add(2 * c.cfg.Ttl) } - for i := 0; i < numberOfKeys; i++ { + for i := range numberOfKeys { key := RepeatStringIfNeeded(fmt.Sprintf("key%d", i), keySize) originalSize := cache.cachedBytes p, loaded := cache.getPromiseForKey(key, func() (int, int64, error) { @@ -213,10 +213,10 @@ func Test_memHashString(test *testing.T) { numberOfMetrics := 100 occurrences := map[uint64]int{} - for k := 0; k < 10; k++ { - for j := 0; j < numberOfMetrics; j++ { + for range 10 { + for j := range numberOfMetrics { metricName := fmt.Sprintf("metricName%v", j) - for i := 0; i < numberOfTenants; i++ { + for i := range numberOfTenants { userId := fmt.Sprintf("user%v", i) occurrences[memHashString(userId, metricName)]++ } diff --git a/pkg/storage/tsdb/index_cache.go b/pkg/storage/tsdb/index_cache.go index 20200a76ec6..7c1011f74a9 100644 --- a/pkg/storage/tsdb/index_cache.go +++ b/pkg/storage/tsdb/index_cache.go @@ -3,6 +3,7 @@ package tsdb import ( "flag" "fmt" + "slices" "strings" "time" @@ -14,7 +15,6 @@ import ( "github.com/thanos-io/thanos/pkg/model" storecache "github.com/thanos-io/thanos/pkg/store/cache" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -84,7 +84,7 @@ func (cfg *IndexCacheConfig) Validate() error { } for _, backend := range splitBackends { - if !util.StringsContain(supportedIndexCacheBackends, backend) { + if !slices.Contains(supportedIndexCacheBackends, backend) { return errUnsupportedIndexCacheBackend } @@ -249,10 +249,7 @@ func newInMemoryIndexCache(cfg InMemoryIndexCacheConfig, logger log.Logger, regi maxCacheSize := model.Bytes(cfg.MaxSizeBytes) // Calculate the max item size. - maxItemSize := defaultMaxItemSize - if maxItemSize > maxCacheSize { - maxItemSize = maxCacheSize - } + maxItemSize := min(defaultMaxItemSize, maxCacheSize) return NewInMemoryIndexCacheWithConfig(logger, nil, registerer, storecache.InMemoryIndexCacheConfig{ MaxSize: maxCacheSize, diff --git a/pkg/storage/tsdb/inmemory_index_cache_test.go b/pkg/storage/tsdb/inmemory_index_cache_test.go index 805a3cf42b0..297e249ae0e 100644 --- a/pkg/storage/tsdb/inmemory_index_cache_test.go +++ b/pkg/storage/tsdb/inmemory_index_cache_test.go @@ -24,7 +24,7 @@ import ( func TestInMemoryIndexCache_UpdateItem(t *testing.T) { var errorLogs []string - errorLogger := log.LoggerFunc(func(kvs ...interface{}) error { + errorLogger := log.LoggerFunc(func(kvs ...any) error { var lvl string for i := 0; i < len(kvs); i += 2 { if kvs[i] == "level" { @@ -135,7 +135,7 @@ func TestInMemoryIndexCacheSetOverflow(t *testing.T) { testutil.Equals(t, float64(0), prom_testutil.ToFloat64(counter)) var sb strings.Builder - for i := 0; i < 100; i++ { + for i := range 100 { sb.WriteString(strconv.Itoa(i)) } // Trigger overflow with a large value. @@ -162,8 +162,7 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } }) @@ -172,8 +171,7 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } }) @@ -182,8 +180,8 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), postingData, tenancy.DefaultTenant) } }) @@ -192,8 +190,7 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), postingData, tenancy.DefaultTenant) } }) @@ -219,9 +216,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), seriesData, tenancy.DefaultTenant) @@ -230,7 +226,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -241,9 +237,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), seriesData, tenancy.DefaultTenant) @@ -252,7 +247,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -263,9 +258,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), postingData, tenancy.DefaultTenant) @@ -274,7 +268,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -285,9 +279,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), postingData, tenancy.DefaultTenant) @@ -296,7 +289,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -317,19 +310,18 @@ func BenchmarkInMemoryIndexCacheFetch(b *testing.B) { ctx := context.Background() items := 10000 ids := make([]storage.SeriesRef, items) - for i := 0; i < items; i++ { + for i := range items { ids[i] = storage.SeriesRef(i) } b.Run("FastCache", func(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) } }) @@ -337,12 +329,11 @@ func BenchmarkInMemoryIndexCacheFetch(b *testing.B) { b.Run("ThanosCache", func(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) } }) @@ -362,21 +353,20 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { ctx := context.Background() items := 10000 ids := make([]storage.SeriesRef, items) - for i := 0; i < items; i++ { + for i := range items { ids[i] = storage.SeriesRef(i) } b.Run("FastCache", func(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() ch := make(chan int) - for i := 0; i < 500; i++ { + for range 500 { go func() { for range ch { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) @@ -384,7 +374,7 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -393,14 +383,13 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { b.Run("ThanosCache", func(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() ch := make(chan int) - for i := 0; i < 500; i++ { + for range 500 { go func() { for range ch { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) @@ -408,7 +397,7 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) diff --git a/pkg/storage/tsdb/meta_extensions_test.go b/pkg/storage/tsdb/meta_extensions_test.go index 6f296eb461c..f6108170e79 100644 --- a/pkg/storage/tsdb/meta_extensions_test.go +++ b/pkg/storage/tsdb/meta_extensions_test.go @@ -139,7 +139,7 @@ func TestGetPartitionedInfo(t *testing.T) { PartitionedGroupID uint32 `json:"partitionedGroupId"` PartitionCount int `json:"partitionCount"` PartitionID int `json:"partitionId"` - } `json:"partition_info,omitempty"` + } `json:"partition_info"` }{ PartitionInfo: struct { PartitionedGroupID uint32 `json:"partitionedGroupId"` diff --git a/pkg/storage/tsdb/multilevel_bucket_cache.go b/pkg/storage/tsdb/multilevel_bucket_cache.go index 83fcfddce78..f9e2b4fbfd1 100644 --- a/pkg/storage/tsdb/multilevel_bucket_cache.go +++ b/pkg/storage/tsdb/multilevel_bucket_cache.go @@ -5,6 +5,7 @@ import ( "errors" "flag" "fmt" + "maps" "time" "github.com/prometheus/client_golang/prometheus" @@ -127,9 +128,7 @@ func (m *multiLevelBucketCache) Fetch(ctx context.Context, keys []string) map[st return nil } if data := c.Fetch(ctx, missingKeys); len(data) > 0 { - for k, d := range data { - hits[k] = d - } + maps.Copy(hits, data) if i > 0 && len(hits) > 0 { // lets fetch only the mising keys @@ -142,9 +141,7 @@ func (m *multiLevelBucketCache) Fetch(ctx context.Context, keys []string) map[st missingKeys = m - for k, b := range hits { - backfillItems[i-1][k] = b - } + maps.Copy(backfillItems[i-1], hits) } if len(hits) == len(keys) { diff --git a/pkg/storage/tsdb/multilevel_index_cache.go b/pkg/storage/tsdb/multilevel_index_cache.go index f46610fc242..bab35f74710 100644 --- a/pkg/storage/tsdb/multilevel_index_cache.go +++ b/pkg/storage/tsdb/multilevel_index_cache.go @@ -3,6 +3,7 @@ package tsdb import ( "context" "errors" + "maps" "slices" "github.com/oklog/ulid/v2" @@ -54,9 +55,7 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U h, mi := c.FetchMultiPostings(ctx, blockID, misses, tenant) misses = mi - for label, bytes := range h { - hits[label] = bytes - } + maps.Copy(hits, h) if i > 0 { backfillItems[i-1] = h @@ -71,8 +70,6 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypePostings)) defer backFillTimer.ObserveDuration() for i, values := range backfillItems { - i := i - values := values if len(values) == 0 { continue } @@ -160,9 +157,7 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI h, miss := c.FetchMultiSeries(ctx, blockID, misses, tenant) misses = miss - for label, bytes := range h { - hits[label] = bytes - } + maps.Copy(hits, h) if i > 0 && len(h) > 0 { backfillItems[i-1] = h @@ -177,8 +172,6 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeSeries)) defer backFillTimer.ObserveDuration() for i, values := range backfillItems { - i := i - values := values if len(values) == 0 { continue } diff --git a/pkg/storage/tsdb/multilevel_index_cache_test.go b/pkg/storage/tsdb/multilevel_index_cache_test.go index 4d05dfaae0e..781323b408d 100644 --- a/pkg/storage/tsdb/multilevel_index_cache_test.go +++ b/pkg/storage/tsdb/multilevel_index_cache_test.go @@ -159,18 +159,18 @@ func Test_MultiLevelCache(t *testing.T) { v2 := make([]byte, 200) testCases := map[string]struct { - m1ExpectedCalls map[string][][]interface{} - m2ExpectedCalls map[string][][]interface{} - m1MockedCalls map[string][]interface{} - m2MockedCalls map[string][]interface{} + m1ExpectedCalls map[string][][]any + m2ExpectedCalls map[string][][]any + m1MockedCalls map[string][]any + m2MockedCalls map[string][]any enabledItems [][]string call func(storecache.IndexCache) }{ "[StorePostings] Should store on all caches": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StorePostings": {{bID, l1, v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "StorePostings": {{bID, l1, v}}, }, call: func(cache storecache.IndexCache) { @@ -178,8 +178,8 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StorePostings] Should store on m2 only": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "StorePostings": {{bID, l1, v}}, }, enabledItems: [][]string{ @@ -191,10 +191,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreSeries] Should store on all caches": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, call: func(cache storecache.IndexCache) { @@ -202,8 +202,8 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreSeries] Should store on m2 only": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, enabledItems: [][]string{ @@ -215,10 +215,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreExpandedPostings] Should store on all caches": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, call: func(cache storecache.IndexCache) { @@ -226,8 +226,8 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreExpandedPostings] Should store on m2 only": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, enabledItems: [][]string{ @@ -239,10 +239,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] Should fallback when all misses": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, }, call: func(cache storecache.IndexCache) { @@ -250,17 +250,17 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] should fallback and backfill only the missing keys on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, "StorePostings": {{bID, l2, v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l2}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: make([]byte, 1)}, []labels.Label{l2}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l2: v}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { @@ -268,17 +268,17 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] should fallback and backfill only the missing keys on l1, multiple items": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2, l3}}}, "StorePostings": {{bID, l2, v}, {bID, l3, v2}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l2, l3}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: make([]byte, 1)}, []labels.Label{l2, l3}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l2: v, l3: v2}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { @@ -286,12 +286,12 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] m1 doesn't enable postings": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2, l3}}}, }, - m1MockedCalls: map[string][]interface{}{}, - m2MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{}, + m2MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: v, l2: v, l3: v2}, []labels.Label{}}, }, enabledItems: [][]string{ @@ -303,11 +303,11 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] should not fallback when all hit on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{}, - m1MockedCalls: map[string][]interface{}{ + m2ExpectedCalls: map[string][][]any{}, + m1MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: make([]byte, 1), l2: make([]byte, 1)}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { @@ -315,10 +315,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] Should fallback when all misses": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, }, call: func(cache storecache.IndexCache) { @@ -326,17 +326,17 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] should fallback and backfill only the missing keys on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, "StoreSeries": {{bID, storage.SeriesRef(2), v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{2}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: v}, []storage.SeriesRef{2}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{2: v}, []storage.SeriesRef{}}, }, call: func(cache storecache.IndexCache) { @@ -344,20 +344,20 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] should fallback and backfill only the missing keys on l1, multiple items": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2, 3}}}, "StoreSeries": { {bID, storage.SeriesRef(2), v}, {bID, storage.SeriesRef(3), v2}, }, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{2, 3}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: v}, []storage.SeriesRef{2, 3}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{2: v, 3: v2}, []storage.SeriesRef{}}, }, call: func(cache storecache.IndexCache) { @@ -365,12 +365,12 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] m1 doesn't enable series": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2, 3}}}, }, - m1MockedCalls: map[string][]interface{}{}, - m2MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{}, + m2MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: v, 2: v, 3: v2}, []storage.SeriesRef{}}, }, enabledItems: [][]string{ @@ -382,11 +382,11 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] should not fallback when all hit on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{}, - m1MockedCalls: map[string][]interface{}{ + m2ExpectedCalls: map[string][][]any{}, + m1MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: make([]byte, 1), 2: make([]byte, 1)}, []storage.SeriesRef{}}, }, call: func(cache storecache.IndexCache) { @@ -394,14 +394,14 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchExpandedPostings] Should fallback and backfill when miss": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchExpandedPostings": {v, true}, }, call: func(cache storecache.IndexCache) { @@ -409,11 +409,11 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchExpandedPostings] should not fallback when all hit on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m2ExpectedCalls: map[string][][]interface{}{}, - m1MockedCalls: map[string][]interface{}{ + m2ExpectedCalls: map[string][][]any{}, + m1MockedCalls: map[string][]any{ "FetchExpandedPostings": {[]byte{}, true}, }, call: func(cache storecache.IndexCache) { @@ -421,12 +421,12 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchExpandedPostings] m1 doesn't enable expanded postings": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m1MockedCalls: map[string][]interface{}{}, - m2MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{}, + m2MockedCalls: map[string][]any{ "FetchExpandedPostings": {[]byte{}, true}, }, enabledItems: [][]string{ @@ -475,29 +475,29 @@ func Test_MultiLevelCache(t *testing.T) { } } -func newMockIndexCache(mockedCalls map[string][]interface{}) *mockIndexCache { +func newMockIndexCache(mockedCalls map[string][]any) *mockIndexCache { return &mockIndexCache{ - calls: map[string][][]interface{}{}, + calls: map[string][][]any{}, mockedCalls: mockedCalls, } } type mockIndexCache struct { mtx sync.Mutex - calls map[string][][]interface{} - mockedCalls map[string][]interface{} + calls map[string][][]any + mockedCalls map[string][]any } func (m *mockIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["StorePostings"] = append(m.calls["StorePostings"], []interface{}{blockID, l, v}) + m.calls["StorePostings"] = append(m.calls["StorePostings"], []any{blockID, l, v}) } func (m *mockIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["FetchMultiPostings"] = append(m.calls["FetchMultiPostings"], []interface{}{blockID, keys}) + m.calls["FetchMultiPostings"] = append(m.calls["FetchMultiPostings"], []any{blockID, keys}) if m, ok := m.mockedCalls["FetchMultiPostings"]; ok { return m[0].(map[labels.Label][]byte), m[1].([]labels.Label) } @@ -508,13 +508,13 @@ func (m *mockIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID func (m *mockIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["StoreExpandedPostings"] = append(m.calls["StoreExpandedPostings"], []interface{}{blockID, matchers, v}) + m.calls["StoreExpandedPostings"] = append(m.calls["StoreExpandedPostings"], []any{blockID, matchers, v}) } func (m *mockIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["FetchExpandedPostings"] = append(m.calls["FetchExpandedPostings"], []interface{}{blockID, matchers}) + m.calls["FetchExpandedPostings"] = append(m.calls["FetchExpandedPostings"], []any{blockID, matchers}) if m, ok := m.mockedCalls["FetchExpandedPostings"]; ok { return m[0].([]byte), m[1].(bool) } @@ -525,13 +525,13 @@ func (m *mockIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.U func (m *mockIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["StoreSeries"] = append(m.calls["StoreSeries"], []interface{}{blockID, id, v}) + m.calls["StoreSeries"] = append(m.calls["StoreSeries"], []any{blockID, id, v}) } func (m *mockIndexCache) FetchMultiSeries(_ context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["FetchMultiSeries"] = append(m.calls["FetchMultiSeries"], []interface{}{blockID, ids}) + m.calls["FetchMultiSeries"] = append(m.calls["FetchMultiSeries"], []any{blockID, ids}) if m, ok := m.mockedCalls["FetchMultiSeries"]; ok { return m[0].(map[storage.SeriesRef][]byte), m[1].([]storage.SeriesRef) } diff --git a/pkg/storage/tsdb/testutil/objstore.go b/pkg/storage/tsdb/testutil/objstore.go index c2ad987f5c7..0892d19b6fe 100644 --- a/pkg/storage/tsdb/testutil/objstore.go +++ b/pkg/storage/tsdb/testutil/objstore.go @@ -4,6 +4,7 @@ import ( "context" "io" "os" + "slices" "strings" "testing" @@ -12,8 +13,6 @@ import ( "github.com/thanos-io/objstore" "go.uber.org/atomic" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/storage/bucket/filesystem" ) @@ -45,7 +44,7 @@ type MockBucketFailure struct { } func (m *MockBucketFailure) Delete(ctx context.Context, name string) error { - if util.StringsContain(m.DeleteFailures, name) { + if slices.Contains(m.DeleteFailures, name) { return errors.New("mocked delete failure") } return m.Bucket.Delete(ctx, name) diff --git a/pkg/storage/tsdb/users/cache_test.go b/pkg/storage/tsdb/users/cache_test.go index 9b1f0d7d426..6ef5588c922 100644 --- a/pkg/storage/tsdb/users/cache_test.go +++ b/pkg/storage/tsdb/users/cache_test.go @@ -69,7 +69,6 @@ func TestCachedScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -132,7 +131,7 @@ func TestCachedScanner_ConcurrentAccess(t *testing.T) { const goroutines = 10 done := make(chan struct{}) - for i := 0; i < goroutines; i++ { + for range goroutines { go func() { defer func() { done <- struct{}{} }() @@ -145,7 +144,7 @@ func TestCachedScanner_ConcurrentAccess(t *testing.T) { } // Wait for all goroutines to complete - for i := 0; i < goroutines; i++ { + for range goroutines { <-done } diff --git a/pkg/storage/tsdb/users/scanner_test.go b/pkg/storage/tsdb/users/scanner_test.go index 433f85d3aec..6f906237346 100644 --- a/pkg/storage/tsdb/users/scanner_test.go +++ b/pkg/storage/tsdb/users/scanner_test.go @@ -74,7 +74,6 @@ func TestListScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -187,7 +186,6 @@ func TestUserIndexScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -268,7 +266,6 @@ func TestShardedScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/storage/tsdb/users/updater_test.go b/pkg/storage/tsdb/users/updater_test.go index 1828597b05c..c5273c8e039 100644 --- a/pkg/storage/tsdb/users/updater_test.go +++ b/pkg/storage/tsdb/users/updater_test.go @@ -73,7 +73,6 @@ func TestUserIndexUpdater_UpdateUserIndex(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) diff --git a/pkg/storage/tsdb/users_scanner_config_test.go b/pkg/storage/tsdb/users_scanner_config_test.go index 2abe0451c4a..9e6d20a37c7 100644 --- a/pkg/storage/tsdb/users_scanner_config_test.go +++ b/pkg/storage/tsdb/users_scanner_config_test.go @@ -97,7 +97,6 @@ func TestUsersScannerConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/storegateway/bucket_store_metrics_test.go b/pkg/storegateway/bucket_store_metrics_test.go index ac4ff00df87..2b087b89b36 100644 --- a/pkg/storegateway/bucket_store_metrics_test.go +++ b/pkg/storegateway/bucket_store_metrics_test.go @@ -631,12 +631,11 @@ func benchmarkMetricsCollection(b *testing.B, users int) { mainReg.MustRegister(tsdbMetrics) base := 123456.0 - for i := 0; i < users; i++ { + for i := range users { tsdbMetrics.AddUserRegistry(fmt.Sprintf("user-%d", i), populateMockedBucketStoreMetrics(base*float64(i))) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = mainReg.Gather() } } diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 674a2bae27b..831b7afb2b4 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -10,6 +10,7 @@ import ( "os" "path" "path/filepath" + "slices" "sort" "strings" "testing" @@ -470,7 +471,6 @@ func TestBucketStores_scanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -997,7 +997,7 @@ func (u *userShardingStrategy) FilterUsers(ctx context.Context, userIDs []string } func (u *userShardingStrategy) FilterBlocks(ctx context.Context, userID string, metas map[ulid.ULID]*thanos_metadata.Meta, loaded map[ulid.ULID]struct{}, synced block.GaugeVec) error { - if util.StringsContain(u.users, userID) { + if slices.Contains(u.users, userID) { return nil } @@ -1008,7 +1008,7 @@ func (u *userShardingStrategy) FilterBlocks(ctx context.Context, userID string, } func (u *userShardingStrategy) OwnBlock(userID string, _ thanos_metadata.Meta) (bool, error) { - if util.StringsContain(u.users, userID) { + if slices.Contains(u.users, userID) { return true, nil } diff --git a/pkg/storegateway/gateway.go b/pkg/storegateway/gateway.go index 9e61d63abf2..9608a5257c8 100644 --- a/pkg/storegateway/gateway.go +++ b/pkg/storegateway/gateway.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "net/http" + "slices" "strings" "time" @@ -89,7 +90,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Validate the Config. func (cfg *Config) Validate(limits validation.Limits, monitoredResources flagext.StringSliceCSV) error { if cfg.ShardingEnabled { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } diff --git a/pkg/storegateway/gateway_ring_test.go b/pkg/storegateway/gateway_ring_test.go index c00f227f8ad..6142fd131a1 100644 --- a/pkg/storegateway/gateway_ring_test.go +++ b/pkg/storegateway/gateway_ring_test.go @@ -57,7 +57,6 @@ func TestIsHealthyForStoreGatewayOperations(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index b9070c236e7..29088911f86 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -84,7 +84,6 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() cfg := &Config{} @@ -131,7 +130,6 @@ func TestStoreGateway_InitialSyncWithDefaultShardingEnabled(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() ctx := context.Background() @@ -146,7 +144,7 @@ func TestStoreGateway_InitialSyncWithDefaultShardingEnabled(t *testing.T) { // Setup the initial instance state in the ring. if testData.initialExists { - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) ringDesc.AddIngester(gatewayCfg.ShardingRing.InstanceID, gatewayCfg.ShardingRing.InstanceAddr, "", testData.initialTokens, testData.initialState, time.Now()) return ringDesc, true, nil @@ -532,7 +530,7 @@ func TestStoreGateway_BlocksSyncWithDefaultSharding_RingTopologyChangedAfterScal // store-gateways behaves with regards to blocks syncing while other replicas are JOINING. // Wait until all the initial store-gateways sees all new store-gateways too. - test.Poll(t, 5*time.Second, float64(numAllGateways*numInitialGateways), func() interface{} { + test.Poll(t, 5*time.Second, float64(numAllGateways*numInitialGateways), func() any { metrics := initialRegistries.BuildMetricFamiliesPerUser() return metrics.GetSumOfGauges("cortex_ring_members") }) @@ -568,7 +566,7 @@ func TestStoreGateway_BlocksSyncWithDefaultSharding_RingTopologyChangedAfterScal // At this point the new store-gateways are expected to be ACTIVE in the ring and all the initial // store-gateways should unload blocks they don't own anymore. - test.Poll(t, 5*time.Second, float64(expectedBlocksLoaded), func() interface{} { + test.Poll(t, 5*time.Second, float64(expectedBlocksLoaded), func() any { metrics := allRegistries.BuildMetricFamiliesPerUser() return metrics.GetSumOfGauges("cortex_bucket_store_blocks_loaded") }) @@ -596,7 +594,6 @@ func TestStoreGateway_ShouldSupportLoadRingTokensFromFile(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() tokensFile, err := os.CreateTemp(os.TempDir(), "tokens-*") @@ -812,7 +809,6 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() ctx := context.Background() @@ -835,7 +831,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { require.NoError(t, err) // Store the initial ring state before starting the gateway. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) testData.setupRing(ringDesc) return ringDesc, true, nil @@ -851,7 +847,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { assert.Equal(t, float64(1), metrics.GetSumOfCounters("cortex_storegateway_bucket_sync_total")) // Change the ring topology. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) testData.updateRing(ringDesc) return ringDesc, true, nil @@ -859,7 +855,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { // Assert whether the sync triggered or not. if testData.expectedSync { - test.Poll(t, time.Second, float64(2), func() interface{} { + test.Poll(t, time.Second, float64(2), func() any { metrics := regs.BuildMetricFamiliesPerUser() return metrics.GetSumOfCounters("cortex_storegateway_bucket_sync_total") }) @@ -900,7 +896,7 @@ func TestStoreGateway_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testin defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck // Add an unhealthy instance to the ring. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) tg := ring.NewRandomTokenGenerator() instance := ringDesc.AddIngester(unhealthyInstanceID, "1.1.1.1", "", tg.GenerateTokens(ringDesc, unhealthyInstanceID, "", RingNumTokens, true), ring.ACTIVE, time.Now()) @@ -911,7 +907,7 @@ func TestStoreGateway_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testin })) // Ensure the unhealthy instance is removed from the ring. - test.Poll(t, time.Second, false, func() interface{} { + test.Poll(t, time.Second, false, func() any { d, err := ringStore.Get(ctx, RingKey) if err != nil { return err @@ -969,7 +965,6 @@ func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) { } for _, bucketIndexEnabled := range []bool{true, false} { - bucketIndexEnabled := bucketIndexEnabled t.Run(fmt.Sprintf("bucket index enabled = %v", bucketIndexEnabled), func(t *testing.T) { t.Parallel() // Create a store-gateway used to query back the series from the blocks. @@ -998,7 +993,7 @@ func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) { assert.Empty(t, srv.Warnings) assert.Len(t, srv.SeriesSet, numSeries) - for seriesID := 0; seriesID < numSeries; seriesID++ { + for seriesID := range numSeries { actual := srv.SeriesSet[seriesID] // Ensure Cortex external labels have been removed. @@ -1315,7 +1310,7 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int i++ } } else { - for i := 0; i < numSeries; i++ { + for i := range numSeries { addSample(i) } } diff --git a/pkg/storegateway/partitioner.go b/pkg/storegateway/partitioner.go index 816a45d8a54..a7b6477ec1e 100644 --- a/pkg/storegateway/partitioner.go +++ b/pkg/storegateway/partitioner.go @@ -41,7 +41,7 @@ func newGapBasedPartitioner(maxGapBytes uint64, reg prometheus.Registerer) *gapB func (p *gapBasedPartitioner) Partition(length int, rng func(int) (uint64, uint64)) []store.Part { // Calculate the size of requested ranges. requestedBytes := uint64(0) - for i := 0; i < length; i++ { + for i := range length { start, end := rng(i) requestedBytes += end - start } diff --git a/pkg/storegateway/sharding_strategy_test.go b/pkg/storegateway/sharding_strategy_test.go index 4f9cfe10f10..2f2e002e09c 100644 --- a/pkg/storegateway/sharding_strategy_test.go +++ b/pkg/storegateway/sharding_strategy_test.go @@ -242,8 +242,6 @@ func TestDefaultShardingStrategy(t *testing.T) { } for testName, testData := range tests { - testName := testName - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -253,7 +251,7 @@ func TestDefaultShardingStrategy(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() testData.setupRing(d) return d, true, nil @@ -620,9 +618,6 @@ func TestShuffleShardingStrategy(t *testing.T) { for testName, testData := range tests { for _, zoneStableShuffleSharding := range []bool{false, true} { - testName := testName - testData := testData - t.Run(fmt.Sprintf("%s %s", testName, strconv.FormatBool(zoneStableShuffleSharding)), func(t *testing.T) { t.Parallel() @@ -631,7 +626,7 @@ func TestShuffleShardingStrategy(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() testData.setupRing(d) return d, true, nil @@ -722,7 +717,7 @@ func TestDefaultShardingStrategy_OwnBlock(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() d.AddIngester("instance-1", "127.0.0.1", "zone-a", []uint32{block1Hash + 1}, ring.ACTIVE, registeredAt) d.AddIngester("instance-2", "127.0.0.2", "zone-b", []uint32{block2Hash + 1}, ring.ACTIVE, registeredAt) @@ -772,7 +767,7 @@ func TestShuffleShardingStrategy_OwnBlock(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() d.AddIngester("instance-1", "127.0.0.1", "zone-a", []uint32{block1Hash + 1}, ring.ACTIVE, registeredAt) d.AddIngester("instance-2", "127.0.0.2", "zone-b", []uint32{block2Hash + 1}, ring.ACTIVE, registeredAt) diff --git a/pkg/tracing/migration/bridge_wrapper.go b/pkg/tracing/migration/bridge_wrapper.go index 5a17cc7fec2..96cc5abc4d9 100644 --- a/pkg/tracing/migration/bridge_wrapper.go +++ b/pkg/tracing/migration/bridge_wrapper.go @@ -20,7 +20,7 @@ func (b *CortexBridgeTracerWrapper) StartSpan(operationName string, opts ...open return b.bt.StartSpan(operationName, opts...) } -func (b *CortexBridgeTracerWrapper) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { +func (b *CortexBridgeTracerWrapper) Inject(sm opentracing.SpanContext, format any, carrier any) error { builtinFormat, ok := format.(opentracing.BuiltinFormat) if !ok { @@ -57,7 +57,7 @@ func (b *CortexBridgeTracerWrapper) Inject(sm opentracing.SpanContext, format in } } -func (b *CortexBridgeTracerWrapper) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { +func (b *CortexBridgeTracerWrapper) Extract(format any, carrier any) (opentracing.SpanContext, error) { builtinFormat, ok := format.(opentracing.BuiltinFormat) if !ok { diff --git a/pkg/tracing/migration/bridge_wrapper_test.go b/pkg/tracing/migration/bridge_wrapper_test.go index ea3375958cb..54eb9cdf904 100644 --- a/pkg/tracing/migration/bridge_wrapper_test.go +++ b/pkg/tracing/migration/bridge_wrapper_test.go @@ -75,8 +75,8 @@ func (p *mockPropagator) Extract(ctx context.Context, carrier propagation.TextMa func TestCortexBridgeTracerWrapper_Inject(t *testing.T) { tests := []struct { name string - format interface{} - carrier interface{} + format any + carrier any wantedValues map[string]string }{ { diff --git a/pkg/util/active_user_test.go b/pkg/util/active_user_test.go index 4db9e7b0cdc..60e97f6dbc8 100644 --- a/pkg/util/active_user_test.go +++ b/pkg/util/active_user_test.go @@ -61,7 +61,7 @@ func TestActiveUserConcurrentUpdateAndPurge(t *testing.T) { latestTS := atomic.NewInt64(0) - for j := 0; j < count; j++ { + for range count { done.Add(1) go func() { @@ -79,7 +79,7 @@ func TestActiveUserConcurrentUpdateAndPurge(t *testing.T) { } previousLatest := int64(0) - for i := 0; i < 10; i++ { + for range 10 { time.Sleep(100 * time.Millisecond) latest := latestTS.Load() @@ -110,7 +110,7 @@ func BenchmarkActiveUsers_UpdateUserTimestamp(b *testing.B) { startGoroutinesDoingUpdates(b, c, as) - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { as.UpdateUserTimestamp("test", int64(i)) } }) @@ -124,7 +124,7 @@ func BenchmarkActiveUsers_Purge(b *testing.B) { startGoroutinesDoingUpdates(b, c, as) - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { as.PurgeInactiveUsers(int64(i)) } }) @@ -136,7 +136,7 @@ func startGoroutinesDoingUpdates(b *testing.B, count int, as *ActiveUsers) { stop := atomic.NewBool(false) started := sync.WaitGroup{} - for j := 0; j < count; j++ { + for j := range count { done.Add(1) started.Add(1) userID := fmt.Sprintf("user-%d", j) diff --git a/pkg/util/api/response.go b/pkg/util/api/response.go index c58baf60b95..74b40742128 100644 --- a/pkg/util/api/response.go +++ b/pkg/util/api/response.go @@ -19,7 +19,7 @@ const ( // Response defines the Prometheus response format. type Response struct { Status string `json:"status"` - Data interface{} `json:"data,omitempty"` + Data any `json:"data,omitempty"` ErrorType v1.ErrorType `json:"errorType,omitempty"` Error string `json:"error,omitempty"` Warnings []string `json:"warnings,omitempty"` diff --git a/pkg/util/backoff/backoff_test.go b/pkg/util/backoff/backoff_test.go index dff6432c06b..942cebb6a40 100644 --- a/pkg/util/backoff/backoff_test.go +++ b/pkg/util/backoff/backoff_test.go @@ -80,7 +80,6 @@ func TestBackoff_NextDelay(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/util/concurrency/runner.go b/pkg/util/concurrency/runner.go index 8f6d180c779..df9b5e37a18 100644 --- a/pkg/util/concurrency/runner.go +++ b/pkg/util/concurrency/runner.go @@ -30,7 +30,7 @@ func ForEachUser(ctx context.Context, userIDs []string, concurrency int, userFun wg := sync.WaitGroup{} routines := min(concurrency, len(userIDs)) - for ix := 0; ix < routines; ix++ { + for range routines { wg.Add(1) go func() { defer wg.Done() @@ -62,13 +62,13 @@ func ForEachUser(ctx context.Context, userIDs []string, concurrency int, userFun // ForEach runs the provided jobFunc for each job up to concurrency concurrent workers. // The execution breaks on first error encountered. -func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc func(ctx context.Context, job interface{}) error) error { +func ForEach(ctx context.Context, jobs []any, concurrency int, jobFunc func(ctx context.Context, job any) error) error { if len(jobs) == 0 { return nil } // Push all jobs to a channel. - ch := make(chan interface{}, len(jobs)) + ch := make(chan any, len(jobs)) for _, job := range jobs { ch <- job } @@ -77,7 +77,7 @@ func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc f // Start workers to process jobs. g, ctx := errgroup.WithContext(ctx) routines := min(concurrency, len(jobs)) - for ix := 0; ix < routines; ix++ { + for range routines { g.Go(func() error { for job := range ch { if err := ctx.Err(); err != nil { @@ -98,9 +98,9 @@ func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc f } // CreateJobsFromStrings is a utility to create jobs from an slice of strings. -func CreateJobsFromStrings(values []string) []interface{} { - jobs := make([]interface{}, len(values)) - for i := 0; i < len(values); i++ { +func CreateJobsFromStrings(values []string) []any { + jobs := make([]any, len(values)) + for i := range values { jobs[i] = values[i] } return jobs diff --git a/pkg/util/concurrency/runner_test.go b/pkg/util/concurrency/runner_test.go index 54b171d5b1a..75439268a24 100644 --- a/pkg/util/concurrency/runner_test.go +++ b/pkg/util/concurrency/runner_test.go @@ -83,7 +83,7 @@ func TestForEach(t *testing.T) { jobs := []string{"a", "b", "c"} - err := ForEach(ctx, CreateJobsFromStrings(jobs), 2, func(ctx context.Context, job interface{}) error { + err := ForEach(ctx, CreateJobsFromStrings(jobs), 2, func(ctx context.Context, job any) error { processedMx.Lock() defer processedMx.Unlock() processed = append(processed, job.(string)) @@ -102,7 +102,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationHandled(t *testing.T processed atomic.Int32 ) - err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error { + err := ForEach(ctx, []any{"a", "b", "c"}, 2, func(ctx context.Context, job any) error { if processed.CompareAndSwap(0, 1) { return errors.New("the first request is failing") } @@ -137,7 +137,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *testing var wg sync.WaitGroup wg.Add(2) - err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error { + err := ForEach(ctx, []any{"a", "b", "c"}, 2, func(ctx context.Context, job any) error { wg.Done() if processed.CompareAndSwap(0, 1) { @@ -162,7 +162,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *testing } func TestForEach_ShouldReturnImmediatelyOnNoJobsProvided(t *testing.T) { - require.NoError(t, ForEach(context.Background(), nil, 2, func(ctx context.Context, job interface{}) error { + require.NoError(t, ForEach(context.Background(), nil, 2, func(ctx context.Context, job any) error { return nil })) } diff --git a/pkg/util/config.go b/pkg/util/config.go index e1032d0f6f1..9bf1c7184f6 100644 --- a/pkg/util/config.go +++ b/pkg/util/config.go @@ -6,8 +6,8 @@ import ( ) // DiffConfig utility function that returns the diff between two config map objects -func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[interface{}]interface{}, error) { - output := make(map[interface{}]interface{}) +func DiffConfig(defaultConfig, actualConfig map[any]any) (map[any]any, error) { + output := make(map[any]any) for key, value := range actualConfig { @@ -33,8 +33,8 @@ func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[in if !ok || defaultV != v { output[key] = v } - case []interface{}: - defaultV, ok := defaultValue.([]interface{}) + case []any: + defaultV, ok := defaultValue.([]any) if !ok || !reflect.DeepEqual(defaultV, v) { output[key] = v } @@ -47,8 +47,8 @@ func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[in if defaultValue != nil { output[key] = v } - case map[interface{}]interface{}: - defaultV, ok := defaultValue.(map[interface{}]interface{}) + case map[any]any: + defaultV, ok := defaultValue.(map[any]any) if !ok { output[key] = value } diff --git a/pkg/util/events.go b/pkg/util/events.go index 312f4371486..07453ad19bc 100644 --- a/pkg/util/events.go +++ b/pkg/util/events.go @@ -13,8 +13,8 @@ import ( var ( // interface{} vars to avoid allocation on every call - key interface{} = "level" // masquerade as a level like debug, warn - event interface{} = "event" + key any = "level" // masquerade as a level like debug, warn + event any = "event" eventLogger = log.NewNopLogger() ) @@ -46,7 +46,7 @@ type samplingFilter struct { count atomic.Int64 } -func (e *samplingFilter) Log(keyvals ...interface{}) error { +func (e *samplingFilter) Log(keyvals ...any) error { count := e.count.Inc() if count%int64(e.freq) == 0 { return e.next.Log(keyvals...) diff --git a/pkg/util/fakeauth/fake_auth.go b/pkg/util/fakeauth/fake_auth.go index 92207983dce..c4f538a583e 100644 --- a/pkg/util/fakeauth/fake_auth.go +++ b/pkg/util/fakeauth/fake_auth.go @@ -20,7 +20,7 @@ func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []str ignoredMethods[m] = true } - config.GRPCMiddleware = append(config.GRPCMiddleware, func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + config.GRPCMiddleware = append(config.GRPCMiddleware, func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { if ignoredMethods[info.FullMethod] { return handler(ctx, req) } @@ -28,7 +28,7 @@ func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []str }) config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, - func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if ignoredMethods[info.FullMethod] { return handler(srv, ss) } @@ -55,12 +55,12 @@ var fakeHTTPAuthMiddleware = middleware.Func(func(next http.Handler) http.Handle }) }) -var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { ctx = user.InjectOrgID(ctx, "fake") return handler(ctx, req) } -var fakeGRPCAuthStreamMiddleware = func(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +var fakeGRPCAuthStreamMiddleware = func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { ctx := user.InjectOrgID(ss.Context(), "fake") return handler(srv, serverStream{ ctx: ctx, diff --git a/pkg/util/flagext/cidr.go b/pkg/util/flagext/cidr.go index 72b93b680cf..bb7a19c537e 100644 --- a/pkg/util/flagext/cidr.go +++ b/pkg/util/flagext/cidr.go @@ -46,9 +46,9 @@ func (c CIDRSliceCSV) String() string { // Set implements flag.Value func (c *CIDRSliceCSV) Set(s string) error { - parts := strings.Split(s, ",") + parts := strings.SplitSeq(s, ",") - for _, part := range parts { + for part := range parts { cidr := &CIDR{} if err := cidr.Set(part); err != nil { return errors.Wrapf(err, "cidr: %s", part) @@ -61,7 +61,7 @@ func (c *CIDRSliceCSV) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -77,6 +77,6 @@ func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (c CIDRSliceCSV) MarshalYAML() (interface{}, error) { +func (c CIDRSliceCSV) MarshalYAML() (any, error) { return c.String(), nil } diff --git a/pkg/util/flagext/day.go b/pkg/util/flagext/day.go index 9db695c8326..30aa897af6c 100644 --- a/pkg/util/flagext/day.go +++ b/pkg/util/flagext/day.go @@ -45,7 +45,7 @@ func (v *DayValue) IsSet() bool { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *DayValue) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *DayValue) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -54,6 +54,6 @@ func (v *DayValue) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (v DayValue) MarshalYAML() (interface{}, error) { +func (v DayValue) MarshalYAML() (any, error) { return v.Time.Time().UTC().Format("2006-01-02"), nil } diff --git a/pkg/util/flagext/secret.go b/pkg/util/flagext/secret.go index aa7101b149c..e588b4a24ac 100644 --- a/pkg/util/flagext/secret.go +++ b/pkg/util/flagext/secret.go @@ -16,7 +16,7 @@ func (v *Secret) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *Secret) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -26,7 +26,7 @@ func (v *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (v Secret) MarshalYAML() (interface{}, error) { +func (v Secret) MarshalYAML() (any, error) { if len(v.Value) == 0 { return "", nil } diff --git a/pkg/util/flagext/stringslicecsv.go b/pkg/util/flagext/stringslicecsv.go index 47ccd54ca08..1f1aff6f1c3 100644 --- a/pkg/util/flagext/stringslicecsv.go +++ b/pkg/util/flagext/stringslicecsv.go @@ -18,7 +18,7 @@ func (v *StringSliceCSV) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -28,6 +28,6 @@ func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error } // MarshalYAML implements yaml.Marshaler. -func (v StringSliceCSV) MarshalYAML() (interface{}, error) { +func (v StringSliceCSV) MarshalYAML() (any, error) { return v.String(), nil } diff --git a/pkg/util/flagext/time.go b/pkg/util/flagext/time.go index 452857e9de8..c00d0b7d2b6 100644 --- a/pkg/util/flagext/time.go +++ b/pkg/util/flagext/time.go @@ -46,7 +46,7 @@ func (t *Time) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (t *Time) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (t *Time) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -55,6 +55,6 @@ func (t *Time) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (t Time) MarshalYAML() (interface{}, error) { +func (t Time) MarshalYAML() (any, error) { return t.String(), nil } diff --git a/pkg/util/flagext/url.go b/pkg/util/flagext/url.go index 3b3b8303be8..338a0fb8703 100644 --- a/pkg/util/flagext/url.go +++ b/pkg/util/flagext/url.go @@ -26,7 +26,7 @@ func (v *URLValue) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *URLValue) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -42,7 +42,7 @@ func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (v URLValue) MarshalYAML() (interface{}, error) { +func (v URLValue) MarshalYAML() (any, error) { if v.URL == nil { return "", nil } diff --git a/pkg/util/grpcclient/backoff_retry.go b/pkg/util/grpcclient/backoff_retry.go index 525497e6bfa..c50fffeeee9 100644 --- a/pkg/util/grpcclient/backoff_retry.go +++ b/pkg/util/grpcclient/backoff_retry.go @@ -12,7 +12,7 @@ import ( // NewBackoffRetry gRPC middleware. func NewBackoffRetry(cfg backoff.Config) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { backoff := backoff.New(ctx, cfg) for backoff.Ongoing() { err := invoker(ctx, method, req, reply, cc, opts...) diff --git a/pkg/util/grpcclient/health_check_test.go b/pkg/util/grpcclient/health_check_test.go index 7d2b37c37c9..bede9bdab11 100644 --- a/pkg/util/grpcclient/health_check_test.go +++ b/pkg/util/grpcclient/health_check_test.go @@ -76,17 +76,17 @@ func TestNewHealthCheckService(t *testing.T) { // Generate healthcheck error and wait instance to become unhealthy hMock.err.Store(errors.New("some error")) - cortex_testutil.Poll(t, 5*time.Second, false, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, false, func() any { return instances[0].isHealthy() }) // Mark instance back to a healthy state hMock.err.Store(nil) - cortex_testutil.Poll(t, 5*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, true, func() any { return instances[0].isHealthy() }) - cortex_testutil.Poll(t, i.instanceGcTimeout*2, 0, func() interface{} { + cortex_testutil.Poll(t, i.instanceGcTimeout*2, 0, func() any { return len(i.registeredInstances()) }) @@ -137,7 +137,7 @@ func TestNewHealthCheckInterceptors(t *testing.T) { require.NoError(t, i.iteration(context.Background())) require.False(t, hMock.open.Load()) - cortex_testutil.Poll(t, time.Second, true, func() interface{} { + cortex_testutil.Poll(t, time.Second, true, func() any { err := ui(context.Background(), "", struct{}{}, struct{}{}, ccUnhealthy, invoker) return errors.Is(err, unhealthyErr) || status.Code(err) == codes.Unavailable }) @@ -148,7 +148,7 @@ func TestNewHealthCheckInterceptors(t *testing.T) { // Should mark the instance back to healthy hMock.err.Store(nil) require.NoError(t, i.iteration(context.Background())) - cortex_testutil.Poll(t, time.Second, true, func() interface{} { + cortex_testutil.Poll(t, time.Second, true, func() any { return ui(context.Background(), "", struct{}{}, struct{}{}, ccUnhealthy, invoker) == nil }) } diff --git a/pkg/util/grpcclient/ratelimit.go b/pkg/util/grpcclient/ratelimit.go index 59ba3b7f08a..09ee645b22f 100644 --- a/pkg/util/grpcclient/ratelimit.go +++ b/pkg/util/grpcclient/ratelimit.go @@ -16,7 +16,7 @@ func NewRateLimiter(cfg *Config) grpc.UnaryClientInterceptor { burst = int(cfg.RateLimit) } limiter := rate.NewLimiter(rate.Limit(cfg.RateLimit), burst) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { err := limiter.Wait(ctx) if err != nil { return status.Error(codes.ResourceExhausted, err.Error()) diff --git a/pkg/util/grpcclient/ratelimit_test.go b/pkg/util/grpcclient/ratelimit_test.go index 6a8d6345b9b..a4f704b7e93 100644 --- a/pkg/util/grpcclient/ratelimit_test.go +++ b/pkg/util/grpcclient/ratelimit_test.go @@ -18,7 +18,7 @@ func TestRateLimiterFailureResultsInResourceExhaustedError(t *testing.T) { RateLimit: 0, } conn := grpc.ClientConn{} - invoker := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { + invoker := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl any, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { return nil } diff --git a/pkg/util/grpcclient/signing_handler.go b/pkg/util/grpcclient/signing_handler.go index d5b7803f289..c402c963aa5 100644 --- a/pkg/util/grpcclient/signing_handler.go +++ b/pkg/util/grpcclient/signing_handler.go @@ -27,7 +27,7 @@ type SignRequest interface { VerifySign(context.Context, string) (bool, error) } -func UnarySigningServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { +func UnarySigningServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { rs, ok := req.(SignRequest) if !ok { return handler(ctx, req) @@ -58,7 +58,7 @@ func UnarySigningServerInterceptor(ctx context.Context, req interface{}, _ *grpc return handler(ctx, req) } -func UnarySigningClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { +func UnarySigningClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { rs, ok := req.(SignRequest) if !ok { diff --git a/pkg/util/grpcclient/signing_handler_test.go b/pkg/util/grpcclient/signing_handler_test.go index 4682b34a45f..07193055a03 100644 --- a/pkg/util/grpcclient/signing_handler_test.go +++ b/pkg/util/grpcclient/signing_handler_test.go @@ -18,7 +18,7 @@ func TestUnarySigningHandler(t *testing.T) { w := &cortexpb.WriteRequest{} // Sign Request - err := UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + err := UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { ctx = c return nil }) @@ -34,14 +34,14 @@ func TestUnarySigningHandler(t *testing.T) { ctx = metadata.NewIncomingContext(ctx, md) // Verify signature on the server side - _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req interface{}) (interface{}, error) { + _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req any) (any, error) { return nil, nil }) require.NoError(t, err) // Change user id and make sure the request signature mismatch ctx = user.InjectOrgID(ctx, "user-2") - _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req interface{}) (interface{}, error) { + _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req any) (any, error) { return nil, nil }) @@ -50,7 +50,7 @@ func TestUnarySigningHandler(t *testing.T) { // Return error when signature is not present ctx = user.InjectOrgID(context.Background(), "user-") - _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req interface{}) (interface{}, error) { + _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req any) (any, error) { return nil, nil }) @@ -59,7 +59,7 @@ func TestUnarySigningHandler(t *testing.T) { // Return error when multiples signatures are present md[reqSignHeaderName] = append(md[reqSignHeaderName], "sig1", "sig2") ctx = metadata.NewOutgoingContext(ctx, md) - err = UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + err = UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { ctx = c return nil }) diff --git a/pkg/util/grpcclient/unwrap.go b/pkg/util/grpcclient/unwrap.go index be82fdcd143..ee1e0129dad 100644 --- a/pkg/util/grpcclient/unwrap.go +++ b/pkg/util/grpcclient/unwrap.go @@ -25,7 +25,7 @@ type unwrapErrorClientStream struct { grpc.ClientStream } -func (s *unwrapErrorClientStream) RecvMsg(m interface{}) error { +func (s *unwrapErrorClientStream) RecvMsg(m any) error { err := s.ClientStream.RecvMsg(m) if err != nil { // Try to unwrap the error to get the original error diff --git a/pkg/util/grpcclient/unwrap_test.go b/pkg/util/grpcclient/unwrap_test.go index b518bde12a4..ef6e31ce426 100644 --- a/pkg/util/grpcclient/unwrap_test.go +++ b/pkg/util/grpcclient/unwrap_test.go @@ -17,7 +17,7 @@ type mockClientStream struct { recvErr error } -func (m *mockClientStream) RecvMsg(msg interface{}) error { +func (m *mockClientStream) RecvMsg(msg any) error { return m.recvErr } @@ -37,7 +37,7 @@ func (m *mockClientStream) Context() context.Context { return context.Background() } -func (m *mockClientStream) SendMsg(interface{}) error { +func (m *mockClientStream) SendMsg(any) error { return nil } @@ -78,7 +78,7 @@ func TestUnwrapErrorStreamClientInterceptor(t *testing.T) { ctx := context.Background() stream, err := chainedStreamer(ctx, &grpc.StreamDesc{}, nil, "test") require.NoError(t, err) - var msg interface{} + var msg any err = stream.RecvMsg(&msg) require.Error(t, err) require.EqualError(t, err, originalErr.Error()) diff --git a/pkg/util/grpcencoding/encoding_test.go b/pkg/util/grpcencoding/encoding_test.go index 4d80ffb28ea..4c0d2389fc3 100644 --- a/pkg/util/grpcencoding/encoding_test.go +++ b/pkg/util/grpcencoding/encoding_test.go @@ -104,8 +104,7 @@ func BenchmarkCompress(b *testing.B) { for _, tc := range testCases { b.Run(tc.name, func(b *testing.B) { c := encoding.GetCompressor(tc.name) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { w, _ := c.Compress(io.Discard) _, _ = w.Write(data) _ = w.Close() @@ -139,8 +138,7 @@ func BenchmarkDecompress(b *testing.B) { w, _ := c.Compress(&buf) _, _ = w.Write(data) w.Close() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _, err := decompress(c, buf.Bytes(), 10000) require.NoError(b, err) } diff --git a/pkg/util/grpcencoding/snappy/snappy.go b/pkg/util/grpcencoding/snappy/snappy.go index fe01b4ca351..022b0683019 100644 --- a/pkg/util/grpcencoding/snappy/snappy.go +++ b/pkg/util/grpcencoding/snappy/snappy.go @@ -23,12 +23,12 @@ type compressor struct { func newCompressor() *compressor { c := &compressor{} c.readersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return snappy.NewReader(nil) }, } c.writersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return snappy.NewBufferedWriter(nil) }, } diff --git a/pkg/util/grpcencoding/snappyblock/snappyblock.go b/pkg/util/grpcencoding/snappyblock/snappyblock.go index a40e8429ddc..ce4db92912d 100644 --- a/pkg/util/grpcencoding/snappyblock/snappyblock.go +++ b/pkg/util/grpcencoding/snappyblock/snappyblock.go @@ -24,7 +24,7 @@ type compressor struct { func newCompressor() *compressor { c := &compressor{} c.readersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &reader{ pool: &c.readersPool, cbuff: bytes.NewBuffer(make([]byte, 0, 512)), @@ -32,7 +32,7 @@ func newCompressor() *compressor { }, } c.writersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &writeCloser{ pool: &c.writersPool, buff: bytes.NewBuffer(make([]byte, 0, 512)), diff --git a/pkg/util/grpcutil/naming.go b/pkg/util/grpcutil/naming.go index 8029324406f..701f702bc82 100644 --- a/pkg/util/grpcutil/naming.go +++ b/pkg/util/grpcutil/naming.go @@ -21,7 +21,7 @@ type Update struct { Addr string // Metadata is the updated metadata. It is nil if there is no metadata update. // Metadata is not required for a custom naming implementation. - Metadata interface{} + Metadata any } // Watcher watches for SRV updates on the specified target. diff --git a/pkg/util/grpcutil/util.go b/pkg/util/grpcutil/util.go index 41ab05a350b..b9e4da4afdb 100644 --- a/pkg/util/grpcutil/util.go +++ b/pkg/util/grpcutil/util.go @@ -33,14 +33,14 @@ func IsGRPCContextCanceled(err error) bool { // HTTPHeaderPropagationServerInterceptor allows for propagation of HTTP Request headers across gRPC calls - works // alongside HTTPHeaderPropagationClientInterceptor -func HTTPHeaderPropagationServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { +func HTTPHeaderPropagationServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { ctx = extractForwardedRequestMetadataFromMetadata(ctx) h, err := handler(ctx, req) return h, err } // HTTPHeaderPropagationStreamServerInterceptor does the same as HTTPHeaderPropagationServerInterceptor but for streams -func HTTPHeaderPropagationStreamServerInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func HTTPHeaderPropagationStreamServerInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { ctx := extractForwardedRequestMetadataFromMetadata(ss.Context()) return handler(srv, wrappedServerStream{ ctx: ctx, @@ -60,7 +60,7 @@ func extractForwardedRequestMetadataFromMetadata(ctx context.Context) context.Co // HTTPHeaderPropagationClientInterceptor allows for propagation of HTTP Request headers across gRPC calls - works // alongside HTTPHeaderPropagationServerInterceptor -func HTTPHeaderPropagationClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, +func HTTPHeaderPropagationClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { ctx = injectForwardedRequestMetadata(ctx) return invoker(ctx, method, req, reply, cc, opts...) diff --git a/pkg/util/histogram/testutils.go b/pkg/util/histogram/testutils.go index d0c46a64995..82fdf2b0f6e 100644 --- a/pkg/util/histogram/testutils.go +++ b/pkg/util/histogram/testutils.go @@ -21,7 +21,7 @@ import ( // Adapted from Prometheus model/histogram/test_utils.go GenerateBigTestHistograms. func GenerateTestHistograms(from, step, numHistograms int) []*histogram.Histogram { var histograms []*histogram.Histogram - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { v := int64(from + i*step) histograms = append(histograms, tsdbutil.GenerateTestGaugeHistogram(v)) } diff --git a/pkg/util/http.go b/pkg/util/http.go index 09b6aea9fe6..da7c40cc4db 100644 --- a/pkg/util/http.go +++ b/pkg/util/http.go @@ -46,7 +46,7 @@ func (b BasicAuth) IsEnabled() bool { } // WriteJSONResponse writes some JSON as a HTTP response. -func WriteJSONResponse(w http.ResponseWriter, v interface{}) { +func WriteJSONResponse(w http.ResponseWriter, v any) { w.Header().Set("Content-Type", "application/json") data, err := json.Marshal(v) @@ -62,7 +62,7 @@ func WriteJSONResponse(w http.ResponseWriter, v interface{}) { } // WriteYAMLResponse writes some YAML as a HTTP response. -func WriteYAMLResponse(w http.ResponseWriter, v interface{}) { +func WriteYAMLResponse(w http.ResponseWriter, v any) { // There is not standardised content-type for YAML, text/plain ensures the // YAML is displayed in the browser instead of offered as a download w.Header().Set("Content-Type", "text/plain; charset=utf-8") @@ -97,7 +97,7 @@ func WriteHTMLResponse(w http.ResponseWriter, message string) { // RenderHTTPResponse either responds with json or a rendered html page using the passed in template // by checking the Accepts header -func RenderHTTPResponse(w http.ResponseWriter, v interface{}, t *template.Template, r *http.Request) { +func RenderHTTPResponse(w http.ResponseWriter, v any, t *template.Template, r *http.Request) { accept := r.Header.Get("Accept") if strings.Contains(accept, "application/json") { WriteJSONResponse(w, v) @@ -111,7 +111,7 @@ func RenderHTTPResponse(w http.ResponseWriter, v interface{}, t *template.Templa } // StreamWriteYAMLResponseCommon stream writes data as http response -func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan interface{}, logger log.Logger, marshalFn func(in interface{}) (out []byte, err error)) { +func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan any, logger log.Logger, marshalFn func(in any) (out []byte, err error)) { w.Header().Set("Content-Type", "application/yaml") for v := range iter { data, err := marshalFn(v) @@ -128,12 +128,12 @@ func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan interface{}, } // StreamWriteYAMLResponse stream writes data as http response using yaml v2 library -func StreamWriteYAMLResponse(w http.ResponseWriter, iter chan interface{}, logger log.Logger) { +func StreamWriteYAMLResponse(w http.ResponseWriter, iter chan any, logger log.Logger) { streamWriteYAMLResponseCommon(w, iter, logger, yaml.Marshal) } // StreamWriteYAMLV3Response stream writes data as http response using yaml v3 library -func StreamWriteYAMLV3Response(w http.ResponseWriter, iter chan interface{}, logger log.Logger) { +func StreamWriteYAMLV3Response(w http.ResponseWriter, iter chan any, logger log.Logger) { streamWriteYAMLResponseCommon(w, iter, logger, yamlv3.Marshal) } diff --git a/pkg/util/http_test.go b/pkg/util/http_test.go index d20f886161f..a5226ba4757 100644 --- a/pkg/util/http_test.go +++ b/pkg/util/http_test.go @@ -123,7 +123,7 @@ func TestStreamWriteYAMLResponse(t *testing.T) { w := httptest.NewRecorder() done := make(chan struct{}) - iter := make(chan interface{}) + iter := make(chan any) go func() { util.StreamWriteYAMLResponse(w, iter, util_log.Logger) close(done) diff --git a/pkg/util/httpgrpcutil/errors.go b/pkg/util/httpgrpcutil/errors.go index b2b17eed864..c841e0047b3 100644 --- a/pkg/util/httpgrpcutil/errors.go +++ b/pkg/util/httpgrpcutil/errors.go @@ -7,7 +7,7 @@ import ( "github.com/weaveworks/common/httpgrpc" ) -func WrapHTTPGrpcError(err error, format string, args ...interface{}) error { +func WrapHTTPGrpcError(err error, format string, args ...any) error { if err == nil { return nil } @@ -19,6 +19,6 @@ func WrapHTTPGrpcError(err error, format string, args ...interface{}) error { return httpgrpc.ErrorFromHTTPResponse(&httpgrpc.HTTPResponse{ Code: resp.Code, Headers: resp.Headers, - Body: []byte(fmt.Sprintf("%s, %s", msg, err)), + Body: fmt.Appendf(nil, "%s, %s", msg, err), }) } diff --git a/pkg/util/labelset/tracker.go b/pkg/util/labelset/tracker.go index 6fa703ccb2c..2f624554ba5 100644 --- a/pkg/util/labelset/tracker.go +++ b/pkg/util/labelset/tracker.go @@ -20,7 +20,7 @@ type LabelSetTracker struct { // NewLabelSetTracker initializes a LabelSetTracker to keep track of active labelset limits. func NewLabelSetTracker() *LabelSetTracker { shards := make([]*labelSetCounterShard, 0, numMetricShards) - for i := 0; i < numMetricShards; i++ { + for range numMetricShards { shards = append(shards, &labelSetCounterShard{ RWMutex: &sync.RWMutex{}, userLabelSets: map[string]map[uint64]labels.Labels{}, @@ -53,7 +53,7 @@ func (m *LabelSetTracker) Track(userId string, hash uint64, labelSet labels.Labe // It takes a function for user to customize the metrics cleanup logic when either a user or // a specific label set is removed. If a user is removed then removeUser is set to true. func (m *LabelSetTracker) UpdateMetrics(userSet map[string]map[uint64]struct{}, deleteMetricFunc func(user, labelSetStr string, removeUser bool)) { - for i := 0; i < numMetricShards; i++ { + for i := range numMetricShards { shard := m.shards[i] shard.Lock() @@ -98,7 +98,7 @@ func (m *LabelSetTracker) labelSetExists(userId string, hash uint64, labelSet la // userExists is used for testing only to check the existence of a user. func (m *LabelSetTracker) userExists(userId string) bool { - for i := 0; i < numMetricShards; i++ { + for i := range numMetricShards { shard := m.shards[i] shard.RLock() defer shard.RUnlock() diff --git a/pkg/util/limiter/query_limiter_test.go b/pkg/util/limiter/query_limiter_test.go index 699adccd32e..f58fdc3339c 100644 --- a/pkg/util/limiter/query_limiter_test.go +++ b/pkg/util/limiter/query_limiter_test.go @@ -96,7 +96,7 @@ func TestQueryLimiter_AddSeriesBatch_ShouldReturnErrorOnLimitExceeded(t *testing limiter := NewQueryLimiter(10, 0, 0, 0) series := make([][]cortexpb.LabelAdapter, 0, 10) - for i := 0; i < 10; i++ { + for i := range 10 { s := []cortexpb.LabelAdapter{ { Name: labels.MetricName, @@ -160,7 +160,7 @@ func AddSeriesConcurrentBench(b *testing.B, batchSize int) { worker := func(w int) { defer wg.Done() var series []labels.Labels - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { series = append(series, labels.FromMap(map[string]string{ labels.MetricName: metricName + "_1", @@ -170,10 +170,7 @@ func AddSeriesConcurrentBench(b *testing.B, batchSize int) { for i := 0; i < len(series); i += batchSize { s := make([][]cortexpb.LabelAdapter, 0, batchSize) - j := i + batchSize - if j > len(series) { - j = len(series) - } + j := min(i+batchSize, len(series)) for k := i; k < j; k++ { s = append(s, cortexpb.FromLabelsToLabelAdapters(series[k])) } diff --git a/pkg/util/limiter/rate_limiter_test.go b/pkg/util/limiter/rate_limiter_test.go index 907624c10cc..7fa3f39195d 100644 --- a/pkg/util/limiter/rate_limiter_test.go +++ b/pkg/util/limiter/rate_limiter_test.go @@ -70,9 +70,7 @@ func BenchmarkRateLimiter_CustomMultiTenant(b *testing.B) { limiter := NewRateLimiter(strategy, 10*time.Second) now := time.Now() - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { limiter.AllowN(now, "test", 1) } } @@ -81,9 +79,7 @@ func BenchmarkRateLimiter_OriginalSingleTenant(b *testing.B) { limiter := rate.NewLimiter(rate.Limit(1), 1) now := time.Now() - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { limiter.AllowN(now, 1) } } diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index 79b93b3c576..51df578b21d 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -72,7 +72,7 @@ func newLoggerWithFormat(format logging.Format) log.Logger { return logger } -func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals ...interface{}) log.Logger { +func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals ...any) log.Logger { // Sort the logger chain to avoid expensive log.Valuer evaluation for disallowed level. // Ref: https://github.com/go-kit/log/issues/14#issuecomment-945038252 logger = log.With(logger, "ts", log.DefaultTimestampUTC) @@ -90,7 +90,7 @@ func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals } // Log increments the appropriate Prometheus counter depending on the log level. -func (pl *PrometheusLogger) Log(kv ...interface{}) error { +func (pl *PrometheusLogger) Log(kv ...any) error { pl.logger.Log(kv...) l := "unknown" for i := 1; i < len(kv); i += 2 { diff --git a/pkg/util/log/log_test.go b/pkg/util/log/log_test.go index cb4700afac8..bade053327d 100644 --- a/pkg/util/log/log_test.go +++ b/pkg/util/log/log_test.go @@ -36,7 +36,7 @@ func BenchmarkDisallowedLogLevels(b *testing.B) { require.NoError(b, cfg.LogLevel.Set("warn")) InitLogger(cfg) - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { level.Info(Logger).Log("hello", "world", "number", i) level.Debug(Logger).Log("hello", "world", "number", i) } diff --git a/pkg/util/metrics_helper.go b/pkg/util/metrics_helper.go index e5f9e7fb76b..6678c478754 100644 --- a/pkg/util/metrics_helper.go +++ b/pkg/util/metrics_helper.go @@ -20,7 +20,7 @@ import ( var ( bytesBufferPool = sync.Pool{ - New: func() interface{} { + New: func() any { return bytes.NewBuffer(nil) }, } diff --git a/pkg/util/metrics_helper_test.go b/pkg/util/metrics_helper_test.go index 712f681b6e1..85d9895389b 100644 --- a/pkg/util/metrics_helper_test.go +++ b/pkg/util/metrics_helper_test.go @@ -103,7 +103,7 @@ func BenchmarkGetMetricsWithLabelNames(b *testing.B) { // Generate metrics and add them to a metric family. mf := &dto.MetricFamily{Metric: make([]*dto.Metric, 0, numMetrics)} - for i := 0; i < numMetrics; i++ { + for i := range numMetrics { labels := []*dto.LabelPair{{ Name: proto.String("unique"), Value: proto.String(strconv.Itoa(i)), @@ -122,10 +122,9 @@ func BenchmarkGetMetricsWithLabelNames(b *testing.B) { }) } - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { out := getMetricsWithLabelNames(mf, []string{"label_1", "label_2", "label_3"}) if expected := 1; len(out) != expected { @@ -471,22 +470,22 @@ func TestFloat64PrecisionStability(t *testing.T) { labelNames := []string{"label_one", "label_two"} g := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{Name: "test_gauge"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { g.WithLabelValues("a", strconv.Itoa(i)).Set(rand.Float64()) } c := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{Name: "test_counter"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { c.WithLabelValues("a", strconv.Itoa(i)).Add(rand.Float64()) } h := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{Name: "test_histogram", Buckets: []float64{0.1, 0.5, 1}}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { h.WithLabelValues("a", strconv.Itoa(i)).Observe(rand.Float64()) } s := promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{Name: "test_summary"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { s.WithLabelValues("a", strconv.Itoa(i)).Observe(rand.Float64()) } @@ -496,7 +495,7 @@ func TestFloat64PrecisionStability(t *testing.T) { // Ensure multiple runs always return the same exact results. expected := map[string][]*dto.Metric{} - for run := 0; run < numRuns; run++ { + for run := range numRuns { mf := registries.BuildMetricFamiliesPerUser() gauge := collectMetrics(t, func(out chan prometheus.Metric) { @@ -1002,22 +1001,22 @@ func setupTestMetrics() *testMetrics { labelNames := []string{"label_one", "label_two"} g := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{Name: "test_gauge"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { g.WithLabelValues("a", strconv.Itoa(i)).Set(float64(userID)) } c := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{Name: "test_counter"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { c.WithLabelValues("a", strconv.Itoa(i)).Add(float64(userID)) } h := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{Name: "test_histogram", Buckets: []float64{1, 3, 5}}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { h.WithLabelValues("a", strconv.Itoa(i)).Observe(float64(userID)) } s := promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{Name: "test_summary"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { s.WithLabelValues("a", strconv.Itoa(i)).Observe(float64(userID)) } @@ -1135,8 +1134,7 @@ func BenchmarkGetLabels_SmallSet(b *testing.B) { m.WithLabelValues("worst", "user3").Inc() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { if _, err := GetLabels(m, map[string]string{"user": "user1", "reason": "worse"}); err != nil { b.Fatal(err) } @@ -1163,9 +1161,8 @@ func BenchmarkGetLabels_MediumSet(b *testing.B) { m.WithLabelValues("worst", fmt.Sprintf("user%d", i)).Inc() } } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { if _, err := GetLabels(m, map[string]string{"user": "user1", "reason": "worse"}); err != nil { b.Fatal(err) } diff --git a/pkg/util/middleware/grpc.go b/pkg/util/middleware/grpc.go index aee899095b0..3adea5eb9a5 100644 --- a/pkg/util/middleware/grpc.go +++ b/pkg/util/middleware/grpc.go @@ -15,7 +15,7 @@ import ( // PrometheusGRPCUnaryInstrumentation records duration of gRPC requests client side. func PrometheusGRPCUnaryInstrumentation(metric *prometheus.HistogramVec) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, resp interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, resp any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { start := time.Now() err := invoker(ctx, method, req, resp, cc, opts...) metric.WithLabelValues(method, errorCode(err)).Observe(time.Since(start).Seconds()) @@ -46,7 +46,7 @@ type instrumentedClientStream struct { grpc.ClientStream } -func (s *instrumentedClientStream) SendMsg(m interface{}) error { +func (s *instrumentedClientStream) SendMsg(m any) error { err := s.ClientStream.SendMsg(m) if err == nil { return err @@ -61,7 +61,7 @@ func (s *instrumentedClientStream) SendMsg(m interface{}) error { return err } -func (s *instrumentedClientStream) RecvMsg(m interface{}) error { +func (s *instrumentedClientStream) RecvMsg(m any) error { err := s.ClientStream.RecvMsg(m) if err == nil { return err @@ -104,7 +104,7 @@ type instrumentedReusableClientStream struct { grpc.ClientStream } -func (s *instrumentedReusableClientStream) SendMsg(m interface{}) error { +func (s *instrumentedReusableClientStream) SendMsg(m any) error { start := time.Now() err := s.ClientStream.SendMsg(m) if err != nil && err != io.EOF { @@ -115,7 +115,7 @@ func (s *instrumentedReusableClientStream) SendMsg(m interface{}) error { return err } -func (s *instrumentedReusableClientStream) RecvMsg(m interface{}) error { +func (s *instrumentedReusableClientStream) RecvMsg(m any) error { start := time.Now() err := s.ClientStream.RecvMsg(m) if err != nil && err != io.EOF { diff --git a/pkg/util/modules/modules.go b/pkg/util/modules/modules.go index 06e7e05a1e0..bab811ffcfa 100644 --- a/pkg/util/modules/modules.go +++ b/pkg/util/modules/modules.go @@ -2,6 +2,7 @@ package modules import ( "fmt" + "slices" "sort" "github.com/go-kit/log" @@ -210,11 +211,8 @@ func (m *Manager) findInverseDependencies(mod string, mods []string) []string { result := []string(nil) for _, n := range mods { - for _, d := range m.modules[n].deps { - if d == mod { - result = append(result, n) - break - } + if slices.Contains(m.modules[n].deps, mod) { + result = append(result, n) } } diff --git a/pkg/util/priority_queue.go b/pkg/util/priority_queue.go index 8d11c550883..4bb8b1f0683 100644 --- a/pkg/util/priority_queue.go +++ b/pkg/util/priority_queue.go @@ -30,11 +30,11 @@ func (q queue) Swap(i, j int) { q[i], q[j] = q[j], q[i] } // Push and Pop use pointer receivers because they modify the slice's length, // not just its contents. -func (q *queue) Push(x interface{}) { +func (q *queue) Push(x any) { *q = append(*q, x.(PriorityOp)) } -func (q *queue) Pop() interface{} { +func (q *queue) Pop() any { old := *q n := len(old) x := old[n-1] diff --git a/pkg/util/push/otlp_test.go b/pkg/util/push/otlp_test.go index 84029f76661..efcdb40655b 100644 --- a/pkg/util/push/otlp_test.go +++ b/pkg/util/push/otlp_test.go @@ -637,9 +637,8 @@ func BenchmarkOTLPWriteHandlerCompression(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, jsonContentType, "") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -652,9 +651,8 @@ func BenchmarkOTLPWriteHandlerCompression(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, jsonContentType, "gzip") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -667,9 +665,8 @@ func BenchmarkOTLPWriteHandlerCompression(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, pbContentType, "") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -682,9 +679,8 @@ func BenchmarkOTLPWriteHandlerCompression(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, pbContentType, "gzip") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -892,7 +888,7 @@ func generateOTLPWriteRequestWithSeries(numSeries, samplesPerSeries, numHistogra attributes.PutStr("label2", "value2") attributes.PutStr("label3", "value3") - for i := 0; i < numSeries; i++ { + for i := range numSeries { metricName := fmt.Sprintf("series_%d", i) metricUnit := fmt.Sprintf("unit_%d", i) metricDescription := fmt.Sprintf("description_%d", i) @@ -911,7 +907,7 @@ func generateOTLPWriteRequestWithSeries(numSeries, samplesPerSeries, numHistogra metric.SetUnit(metricUnit) metric.SetEmptyGauge() - for j := 0; j < samplesPerSeries; j++ { + for j := range samplesPerSeries { v := float64(j + i) ts := time.Now().Add(time.Second * 30 * time.Duration(samplesPerSeries-j+1)) dataPoint := metric.Gauge().DataPoints().AppendEmpty() @@ -927,7 +923,7 @@ func generateOTLPWriteRequestWithSeries(numSeries, samplesPerSeries, numHistogra exemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) } - for j := 0; j < numHistogram; j++ { + for j := range numHistogram { ts := time.Now().Add(time.Second * 30 * time.Duration(numHistogram-j+1)) // Generate One Histogram histogramMetric := scopeMetric.AppendEmpty().Metrics().AppendEmpty() diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index 46cb0770f75..03d94d92dc2 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -38,7 +38,7 @@ var ( func makeV2ReqWithSeries(num int) *writev2.Request { ts := make([]writev2.TimeSeries, 0, num) symbols := []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} - for i := 0; i < num; i++ { + for range num { ts = append(ts, writev2.TimeSeries{ LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, Metadata: writev2.Metadata{ @@ -118,10 +118,9 @@ func Benchmark_Handler(b *testing.B) { req, err := createPRW1HTTPRequest(seriesNum) require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { resp := httptest.NewRecorder() handler.ServeHTTP(resp, req) assert.Equal(b, http.StatusOK, resp.Code) @@ -133,10 +132,9 @@ func Benchmark_Handler(b *testing.B) { req, err := createPRW2HTTPRequest(seriesNum) require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { resp := httptest.NewRecorder() handler.ServeHTTP(resp, req) assert.Equal(b, http.StatusOK, resp.Code) @@ -153,9 +151,8 @@ func Benchmark_convertV2RequestToV1(b *testing.B) { b.Run(fmt.Sprintf("%d series", seriesNum), func(b *testing.B) { series := makeV2ReqWithSeries(seriesNum) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := convertV2RequestToV1(series) require.NoError(b, err) } diff --git a/pkg/util/runtimeconfig/manager.go b/pkg/util/runtimeconfig/manager.go index f4bff920ec7..7479f5cdcac 100644 --- a/pkg/util/runtimeconfig/manager.go +++ b/pkg/util/runtimeconfig/manager.go @@ -24,7 +24,7 @@ import ( type BucketClientFactory func(ctx context.Context) (objstore.Bucket, error) // Loader loads the configuration from file. -type Loader func(r io.Reader) (interface{}, error) +type Loader func(r io.Reader) (any, error) // Config holds the config for an Manager instance. // It holds config related to loading per-tenant config. @@ -55,10 +55,10 @@ type Manager struct { logger log.Logger listenersMtx sync.Mutex - listeners []chan interface{} + listeners []chan any configMtx sync.RWMutex - config interface{} + config any configLoadSuccess prometheus.Gauge configHash *prometheus.GaugeVec @@ -115,8 +115,8 @@ func (om *Manager) starting(ctx context.Context) error { // // When config manager is stopped, it closes all channels to notify receivers that they will // not receive any more updates. -func (om *Manager) CreateListenerChannel(buffer int) <-chan interface{} { - ch := make(chan interface{}, buffer) +func (om *Manager) CreateListenerChannel(buffer int) <-chan any { + ch := make(chan any, buffer) om.listenersMtx.Lock() defer om.listenersMtx.Unlock() @@ -126,7 +126,7 @@ func (om *Manager) CreateListenerChannel(buffer int) <-chan interface{} { } // CloseListenerChannel removes given channel from list of channels to send notifications to and closes channel. -func (om *Manager) CloseListenerChannel(listener <-chan interface{}) { +func (om *Manager) CloseListenerChannel(listener <-chan any) { om.listenersMtx.Lock() defer om.listenersMtx.Unlock() @@ -205,13 +205,13 @@ func (om *Manager) loadConfigFromBucket(ctx context.Context) ([]byte, error) { return buf, err } -func (om *Manager) setConfig(config interface{}) { +func (om *Manager) setConfig(config any) { om.configMtx.Lock() defer om.configMtx.Unlock() om.config = config } -func (om *Manager) callListeners(newValue interface{}) { +func (om *Manager) callListeners(newValue any) { om.listenersMtx.Lock() defer om.listenersMtx.Unlock() @@ -238,7 +238,7 @@ func (om *Manager) stopping(_ error) error { } // GetConfig returns last loaded config value, possibly nil. -func (om *Manager) GetConfig() interface{} { +func (om *Manager) GetConfig() any { om.configMtx.RLock() defer om.configMtx.RUnlock() diff --git a/pkg/util/runtimeconfig/manager_test.go b/pkg/util/runtimeconfig/manager_test.go index d68056e0fdc..df14986a61e 100644 --- a/pkg/util/runtimeconfig/manager_test.go +++ b/pkg/util/runtimeconfig/manager_test.go @@ -39,7 +39,7 @@ type testOverrides struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (l *TestLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (l *TestLimits) UnmarshalYAML(unmarshal func(any) error) error { if defaultTestLimits != nil { *l = *defaultTestLimits } @@ -47,7 +47,7 @@ func (l *TestLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { return unmarshal((*plain)(l)) } -func testLoadOverrides(r io.Reader) (interface{}, error) { +func testLoadOverrides(r io.Reader) (any, error) { var overrides = &testOverrides{} decoder := yaml.NewDecoder(r) @@ -74,7 +74,7 @@ func newTestOverridesManagerConfig(t *testing.T, i int32) (*atomic.Int32, Config return config, Config{ ReloadPeriod: 5 * time.Second, LoadPath: tempFile.Name(), - Loader: func(_ io.Reader) (i interface{}, err error) { + Loader: func(_ io.Reader) (i any, err error) { val := int(config.Load()) return val, nil }, @@ -181,7 +181,7 @@ func TestManager_ListenerWithDefaultLimits(t *testing.T) { err = overridesManager.loadConfig(context.TODO()) require.NoError(t, err) - var newValue interface{} + var newValue any select { case newValue = <-ch: // ok diff --git a/pkg/util/runutil/runutil.go b/pkg/util/runutil/runutil.go index 421f7742760..b8303e05b3e 100644 --- a/pkg/util/runutil/runutil.go +++ b/pkg/util/runutil/runutil.go @@ -20,7 +20,7 @@ func CloseWithErrCapture(err *error, closer io.Closer, msg string) { // CloseWithLogOnErr closes an io.Closer and logs any relevant error from it wrapped with the provided format string and // args. -func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, args ...interface{}) { +func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, args ...any) { err := closer.Close() if err == nil || errors.Is(err, os.ErrClosed) { return diff --git a/pkg/util/runutil/runutil_test.go b/pkg/util/runutil/runutil_test.go index b2392185c26..13b83b0e323 100644 --- a/pkg/util/runutil/runutil_test.go +++ b/pkg/util/runutil/runutil_test.go @@ -16,7 +16,7 @@ func TestCloseWithLogOnErr(t *testing.T) { CloseWithLogOnErr(&logger, closer, "closing failed") - assert.Equal(t, []interface{}{ + assert.Equal(t, []any{ "level", level.WarnValue(), "msg", "detected close error", "err", "closing failed: an error", }, logger.keyvals) }) @@ -49,10 +49,10 @@ func (c fakeCloser) Close() error { } type fakeLogger struct { - keyvals []interface{} + keyvals []any } -func (l *fakeLogger) Log(keyvals ...interface{}) error { +func (l *fakeLogger) Log(keyvals ...any) error { l.keyvals = keyvals return nil } diff --git a/pkg/util/services/basic_service_test.go b/pkg/util/services/basic_service_test.go index 0856376a5da..8a12268d9a3 100644 --- a/pkg/util/services/basic_service_test.go +++ b/pkg/util/services/basic_service_test.go @@ -318,8 +318,7 @@ func TestServiceName(t *testing.T) { s := NewIdleService(nil, nil).WithName("test name") require.Equal(t, "test name", DescribeService(s)) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() require.NoError(t, s.StartAsync(ctx)) // once service has started, BasicService will not allow changing the name diff --git a/pkg/util/spanlogger/noop.go b/pkg/util/spanlogger/noop.go index 8c7480ec898..72943361a7d 100644 --- a/pkg/util/spanlogger/noop.go +++ b/pkg/util/spanlogger/noop.go @@ -25,15 +25,15 @@ func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} func (n noopSpan) Context() opentracing.SpanContext { return defaultNoopSpanContext } func (n noopSpan) SetBaggageItem(key, val string) opentracing.Span { return defaultNoopSpan } func (n noopSpan) BaggageItem(key string) string { return emptyString } -func (n noopSpan) SetTag(key string, value interface{}) opentracing.Span { return n } +func (n noopSpan) SetTag(key string, value any) opentracing.Span { return n } func (n noopSpan) LogFields(fields ...log.Field) {} -func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) LogKV(keyVals ...any) {} func (n noopSpan) Finish() {} func (n noopSpan) FinishWithOptions(opts opentracing.FinishOptions) {} func (n noopSpan) SetOperationName(operationName string) opentracing.Span { return n } func (n noopSpan) Tracer() opentracing.Tracer { return defaultNoopTracer } func (n noopSpan) LogEvent(event string) {} -func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) LogEventWithPayload(event string, payload any) {} func (n noopSpan) Log(data opentracing.LogData) {} // StartSpan belongs to the Tracer interface. @@ -42,11 +42,11 @@ func (n noopTracer) StartSpan(operationName string, opts ...opentracing.StartSpa } // Inject belongs to the Tracer interface. -func (n noopTracer) Inject(sp opentracing.SpanContext, format interface{}, carrier interface{}) error { +func (n noopTracer) Inject(sp opentracing.SpanContext, format any, carrier any) error { return nil } // Extract belongs to the Tracer interface. -func (n noopTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { +func (n noopTracer) Extract(format any, carrier any) (opentracing.SpanContext, error) { return nil, opentracing.ErrSpanContextNotFound } diff --git a/pkg/util/spanlogger/spanlogger.go b/pkg/util/spanlogger/spanlogger.go index a96f95726f8..cde7ae045ab 100644 --- a/pkg/util/spanlogger/spanlogger.go +++ b/pkg/util/spanlogger/spanlogger.go @@ -30,14 +30,14 @@ type SpanLogger struct { } // New makes a new SpanLogger, where logs will be sent to the global logger. -func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) { +func New(ctx context.Context, method string, kvps ...any) (*SpanLogger, context.Context) { return NewWithLogger(ctx, util_log.Logger, method, kvps...) } // NewWithLogger makes a new SpanLogger with a custom log.Logger to send logs // to. The provided context will have the logger attached to it and can be // retrieved with FromContext or FromContextWithFallback. -func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) { +func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...any) (*SpanLogger, context.Context) { span, ctx := opentracing.StartSpanFromContext(ctx, method) if ids, _ := tenant.TenantIDs(ctx); len(ids) > 0 { span.SetTag(TenantIDTagName, ids) @@ -83,7 +83,7 @@ func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogg // Log implements gokit's Logger interface; sends logs to underlying logger and // also puts the on the spans. -func (s *SpanLogger) Log(kvps ...interface{}) error { +func (s *SpanLogger) Log(kvps ...any) error { s.Logger.Log(kvps...) fields, err := otlog.InterleavedKVToFields(kvps...) if err != nil { diff --git a/pkg/util/spanlogger/spanlogger_test.go b/pkg/util/spanlogger/spanlogger_test.go index 86bc10e2520..f522fa6f9f5 100644 --- a/pkg/util/spanlogger/spanlogger_test.go +++ b/pkg/util/spanlogger/spanlogger_test.go @@ -25,8 +25,8 @@ func TestSpanLogger_Log(t *testing.T) { } func TestSpanLogger_CustomLogger(t *testing.T) { - var logged [][]interface{} - var logger funcLogger = func(keyvals ...interface{}) error { + var logged [][]any + var logger funcLogger = func(keyvals ...any) error { logged = append(logged, keyvals) return nil } @@ -39,7 +39,7 @@ func TestSpanLogger_CustomLogger(t *testing.T) { span = FromContextWithFallback(context.Background(), logger) _ = span.Log("msg", "fallback spanlogger") - expect := [][]interface{}{ + expect := [][]any{ {"method", "test", "msg", "original spanlogger"}, {"msg", "restored spanlogger"}, {"msg", "fallback spanlogger"}, @@ -68,8 +68,8 @@ func createSpan(ctx context.Context) *mocktracer.MockSpan { return logger.Span.(*mocktracer.MockSpan) } -type funcLogger func(keyvals ...interface{}) error +type funcLogger func(keyvals ...any) error -func (f funcLogger) Log(keyvals ...interface{}) error { +func (f funcLogger) Log(keyvals ...any) error { return f(keyvals...) } diff --git a/pkg/util/strings.go b/pkg/util/strings.go index 4965dc52a5e..4fdaded30cf 100644 --- a/pkg/util/strings.go +++ b/pkg/util/strings.go @@ -17,17 +17,6 @@ const ( internerLruCacheTTL = time.Hour * 2 ) -// StringsContain returns true if the search value is within the list of input values. -func StringsContain(values []string, search string) bool { - for _, v := range values { - if search == v { - return true - } - } - - return false -} - // StringsMap returns a map where keys are input values. func StringsMap(values []string) map[string]bool { out := make(map[string]bool, len(values)) diff --git a/pkg/util/strings_test.go b/pkg/util/strings_test.go index de4cc28092e..ddc8df1f1a1 100644 --- a/pkg/util/strings_test.go +++ b/pkg/util/strings_test.go @@ -96,10 +96,9 @@ func BenchmarkMergeSlicesParallel(b *testing.B) { b.Run(name, func(b *testing.B) { // Run the benchmark. b.ReportAllocs() - b.ResetTimer() var r []string var err error - for i := 0; i < b.N; i++ { + for b.Loop() { if p == usingMap { r = sortUsingMap(input...) require.NotEmpty(b, r) diff --git a/pkg/util/test/poll.go b/pkg/util/test/poll.go index b88e073a86a..8759d56d2be 100644 --- a/pkg/util/test/poll.go +++ b/pkg/util/test/poll.go @@ -7,7 +7,7 @@ import ( ) // Poll repeatedly evaluates condition until we either timeout, or it succeeds. -func Poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { +func Poll(t testing.TB, d time.Duration, want any, have func() any) { t.Helper() deadline := time.Now().Add(d) for !time.Now().After(deadline) { diff --git a/pkg/util/test_util.go b/pkg/util/test_util.go index 521a921e1cf..193e7dd9d40 100644 --- a/pkg/util/test_util.go +++ b/pkg/util/test_util.go @@ -19,10 +19,10 @@ func GenerateRandomStrings() []string { randomChar := "0123456789abcdef" randomStrings := make([]string, 0, 1000000) sb := strings.Builder{} - for i := 0; i < 1000000; i++ { + for range 1000000 { sb.Reset() sb.WriteString("pod://") - for j := 0; j < 14; j++ { + for range 14 { sb.WriteByte(randomChar[rand.Int()%len(randomChar)]) } randomStrings = append(randomStrings, sb.String()) @@ -50,20 +50,20 @@ func GenerateChunk(t require.TestingT, step time.Duration, from model.Time, poin switch pe { case chunkenc.EncXOR: - for i := 0; i < points; i++ { + for range points { appender.Append(int64(ts), float64(ts)) ts = ts.Add(step) } case chunkenc.EncHistogram: histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points) - for i := 0; i < points; i++ { + for i := range points { _, _, appender, err = appender.AppendHistogram(nil, int64(ts), histograms[i], true) require.NoError(t, err) ts = ts.Add(step) } case chunkenc.EncFloatHistogram: histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points) - for i := 0; i < points; i++ { + for i := range points { _, _, appender, err = appender.AppendFloatHistogram(nil, int64(ts), histograms[i].ToFloat(nil), true) require.NoError(t, err) ts = ts.Add(step) diff --git a/pkg/util/time_test.go b/pkg/util/time_test.go index 6bdeb231938..3696cbace0a 100644 --- a/pkg/util/time_test.go +++ b/pkg/util/time_test.go @@ -36,7 +36,7 @@ func TestTimeFromMillis(t *testing.T) { func TestDurationWithJitter(t *testing.T) { const numRuns = 1000 - for i := 0; i < numRuns; i++ { + for range numRuns { actual := DurationWithJitter(time.Minute, 0.5) assert.GreaterOrEqual(t, int64(actual), int64(30*time.Second)) assert.LessOrEqual(t, int64(actual), int64(90*time.Second)) @@ -50,7 +50,7 @@ func TestDurationWithJitter_ZeroInputDuration(t *testing.T) { func TestDurationWithPositiveJitter(t *testing.T) { const numRuns = 1000 - for i := 0; i < numRuns; i++ { + for range numRuns { actual := DurationWithPositiveJitter(time.Minute, 0.5) assert.GreaterOrEqual(t, int64(actual), int64(60*time.Second)) assert.LessOrEqual(t, int64(actual), int64(90*time.Second)) @@ -230,7 +230,7 @@ func TestSlottedTicker(t *testing.T) { slotSize := tc.duration.Milliseconds() / int64(tc.totalSlots) successCount := 0 - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { tTime := <-ticker.C slotShiftInMs := tTime.UnixMilli() % tc.duration.Milliseconds() slot := slotShiftInMs / slotSize @@ -255,13 +255,13 @@ func TestSlottedTicker(t *testing.T) { ticker := NewSlottedTicker(infoFunc, d, 0) - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { tTime := <-ticker.C slotShiftInMs := tTime.UnixMilli() % d.Milliseconds() return slotShiftInMs >= 60 && slotShiftInMs <= 90 }) slotSize.Store(5) - test.Poll(t, 2*time.Second, true, func() interface{} { + test.Poll(t, 2*time.Second, true, func() any { tTime := <-ticker.C slotShiftInMs := tTime.UnixMilli() % d.Milliseconds() return slotShiftInMs >= 120 && slotShiftInMs <= 180 diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index fd077ebd18c..bbaea991d91 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -6,6 +6,7 @@ import ( "errors" "flag" "fmt" + "maps" "math" "regexp" "strings" @@ -390,7 +391,7 @@ func (l *Limits) Validate(shardByAllLabels bool, activeSeriesMetricsEnabled bool } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (l *Limits) UnmarshalYAML(unmarshal func(any) error) error { // We want to set l to the defaults and then overwrite it with the input. // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML // again, we have to hide it using a type indirection. See prometheus/config. @@ -465,9 +466,7 @@ func (l *Limits) calculateMaxSeriesPerLabelSetId() error { func (l *Limits) copyNotificationIntegrationLimits(defaults NotificationRateLimitMap) { l.NotificationRateLimitPerIntegration = make(map[string]float64, len(defaults)) - for k, v := range defaults { - l.NotificationRateLimitPerIntegration[k] = v - } + maps.Copy(l.NotificationRateLimitPerIntegration, defaults) } func (l *Limits) hasQueryAttributeRegexChanged() bool { diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index 260686fdb50..7896c8ee958 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -126,7 +126,6 @@ func TestLimits_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { assert.ErrorIs(t, testData.limits.Validate(testData.shardByAllLabels, testData.activeSeriesMetricsEnabled), testData.expected) @@ -217,7 +216,7 @@ func TestLimitsTagsYamlMatchJson(t *testing.T) { n := limits.NumField() var mismatch []string - for i := 0; i < n; i++ { + for i := range n { field := limits.Field(i) // Note that we aren't requiring YAML and JSON tags to match, just that @@ -288,7 +287,7 @@ func TestLimitsAlwaysUsesPromDuration(t *testing.T) { n := limits.NumField() var badDurationType []string - for i := 0; i < n; i++ { + for i := range n { field := limits.Field(i) if field.Type == stdlibDuration { badDurationType = append(badDurationType, field.Name) diff --git a/pkg/util/validation/notifications_limit_flag.go b/pkg/util/validation/notifications_limit_flag.go index 403980cd045..d06c7e6ff62 100644 --- a/pkg/util/validation/notifications_limit_flag.go +++ b/pkg/util/validation/notifications_limit_flag.go @@ -3,10 +3,9 @@ package validation import ( "encoding/json" "fmt" + "slices" "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util" ) var allowedIntegrationNames = []string{ @@ -32,7 +31,7 @@ func (m NotificationRateLimitMap) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (m NotificationRateLimitMap) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (m NotificationRateLimitMap) UnmarshalYAML(unmarshal func(any) error) error { newMap := map[string]float64{} return m.updateMap(unmarshal(newMap), newMap) } @@ -43,7 +42,7 @@ func (m NotificationRateLimitMap) updateMap(unmarshalErr error, newMap map[strin } for k, v := range newMap { - if !util.StringsContain(allowedIntegrationNames, k) { + if !slices.Contains(allowedIntegrationNames, k) { return errors.Errorf("unknown integration name: %s", k) } m[k] = v @@ -52,6 +51,6 @@ func (m NotificationRateLimitMap) updateMap(unmarshalErr error, newMap map[strin } // MarshalYAML implements yaml.Marshaler. -func (m NotificationRateLimitMap) MarshalYAML() (interface{}, error) { +func (m NotificationRateLimitMap) MarshalYAML() (any, error) { return map[string]float64(m), nil } diff --git a/pkg/util/worker_pool.go b/pkg/util/worker_pool.go index 8ebaad60e24..d46d0560844 100644 --- a/pkg/util/worker_pool.go +++ b/pkg/util/worker_pool.go @@ -51,7 +51,7 @@ func NewWorkerPool(name string, numWorkers int, reg prometheus.Registerer) Async }), } - for i := 0; i < numWorkers; i++ { + for range numWorkers { go wp.run() } @@ -74,7 +74,7 @@ func (s *workerPoolExecutor) Submit(f func()) { } func (s *workerPoolExecutor) run() { - for completed := 0; completed < serverWorkerResetThreshold; completed++ { + for range serverWorkerResetThreshold { f, ok := <-s.serverWorkerChannel if !ok { return diff --git a/pkg/util/worker_pool_test.go b/pkg/util/worker_pool_test.go index f6294f5a8af..037a9ef61af 100644 --- a/pkg/util/worker_pool_test.go +++ b/pkg/util/worker_pool_test.go @@ -61,7 +61,7 @@ func TestWorkerPool_ShouldFallbackWhenAllWorkersAreBusy(t *testing.T) { // Lets lock all submited jobs m.Lock() - for i := 0; i < numberOfWorkers; i++ { + for range numberOfWorkers { workerPool.Submit(func() { defer blockerWg.Done() m.Lock() diff --git a/pkg/util/yaml.go b/pkg/util/yaml.go index bb8b4d802ab..9286cfb403d 100644 --- a/pkg/util/yaml.go +++ b/pkg/util/yaml.go @@ -4,13 +4,13 @@ import "gopkg.in/yaml.v2" // YAMLMarshalUnmarshal utility function that converts a YAML interface in a map // doing marshal and unmarshal of the parameter -func YAMLMarshalUnmarshal(in interface{}) (map[interface{}]interface{}, error) { +func YAMLMarshalUnmarshal(in any) (map[any]any, error) { yamlBytes, err := yaml.Marshal(in) if err != nil { return nil, err } - object := make(map[interface{}]interface{}) + object := make(map[any]any) if err := yaml.Unmarshal(yamlBytes, object); err != nil { return nil, err } diff --git a/tools/doc-generator/parser.go b/tools/doc-generator/parser.go index 178799eefe3..ecc8c4ba4a5 100644 --- a/tools/doc-generator/parser.go +++ b/tools/doc-generator/parser.go @@ -73,7 +73,7 @@ func parseFlags(cfg flagext.Registerer) map[uintptr]*flag.Flag { return flags } -func parseConfig(block *configBlock, cfg interface{}, flags map[uintptr]*flag.Flag, addedRootBlocks map[string]struct{}) ([]*configBlock, error) { +func parseConfig(block *configBlock, cfg any, flags map[uintptr]*flag.Flag, addedRootBlocks map[string]struct{}) ([]*configBlock, error) { blocks := []*configBlock{} // If the input block is nil it means we're generating the doc for the top-level block @@ -517,7 +517,7 @@ func parseDocTag(f reflect.StructField) map[string]string { return cfg } - for _, entry := range strings.Split(tag, "|") { + for entry := range strings.SplitSeq(tag, "|") { parts := strings.SplitN(entry, "=", 2) switch len(parts) { diff --git a/tools/doc-generator/writer.go b/tools/doc-generator/writer.go index 0b8d6b64bcc..c765cea6429 100644 --- a/tools/doc-generator/writer.go +++ b/tools/doc-generator/writer.go @@ -90,9 +90,9 @@ func (w *specWriter) writeComment(comment string, indent int) { } wrapped := strings.TrimSpace(wordwrap.WrapString(comment, uint(maxLineWidth-indent-2))) - lines := strings.Split(wrapped, "\n") + lines := strings.SplitSeq(wrapped, "\n") - for _, line := range lines { + for line := range lines { w.out.WriteString(pad(indent) + "# " + line + "\n") } } diff --git a/tools/query-audit/auditor.go b/tools/query-audit/auditor.go index 17ff61c3dbe..7d66b750bfe 100644 --- a/tools/query-audit/auditor.go +++ b/tools/query-audit/auditor.go @@ -36,7 +36,7 @@ func (a *Auditor) auditMatrix(x, y model.Matrix) (diff Diff, err error) { return diff, errors.Errorf("different # of series: control=%d, other=%d", len(x), len(y)) } - for i := 0; i < len(x); i++ { + for i := range x { xSeries, ySeries := x[i], y[i] if !xSeries.Metric.Equal(ySeries.Metric) { return diff, errors.Errorf("mismatched metrics: %v vs %v", xSeries.Metric, ySeries.Metric) @@ -52,7 +52,7 @@ func (a *Auditor) auditMatrix(x, y model.Matrix) (diff Diff, err error) { ) } - for j := 0; j < len(xVals); j++ { + for j := range xVals { xSample, ySample := xVals[j], yVals[j] if xSample.Timestamp != ySample.Timestamp { diff --git a/tools/querytee/proxy_endpoint.go b/tools/querytee/proxy_endpoint.go index 20083d0bd8c..1a8adf7c371 100644 --- a/tools/querytee/proxy_endpoint.go +++ b/tools/querytee/proxy_endpoint.go @@ -81,7 +81,6 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back wg.Add(len(p.backends)) for _, b := range p.backends { - b := b go func() { defer wg.Done() diff --git a/tools/querytee/proxy_endpoint_test.go b/tools/querytee/proxy_endpoint_test.go index 6cc2c669b14..7fa2225a1ef 100644 --- a/tools/querytee/proxy_endpoint_test.go +++ b/tools/querytee/proxy_endpoint_test.go @@ -86,7 +86,6 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { endpoint := NewProxyEndpoint(testData.backends, "test", NewProxyMetrics(nil), log.NewNopLogger(), nil)